2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "namespace.h"
40 #include <sys/param.h>
41 #include <sys/queue.h>
43 #include <pthread_np.h>
44 #include "un-namespace.h"
46 #include "thr_private.h"
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) do { \
50 (m)->m_qe.tqe_prev = NULL; \
51 (m)->m_qe.tqe_next = NULL; \
53 #define MUTEX_ASSERT_IS_OWNED(m) do { \
54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\
55 PANIC("mutex is not on list"); \
57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \
59 (m)->m_qe.tqe_next != NULL)) \
60 PANIC("mutex is on list"); \
63 #define MUTEX_INIT_LINK(m)
64 #define MUTEX_ASSERT_IS_OWNED(m)
65 #define MUTEX_ASSERT_NOT_OWNED(m)
69 * For adaptive mutexes, how many times to spin doing trylock2
70 * before entering the kernel to block
72 #define MUTEX_ADAPTIVE_SPINS 2000
77 int __pthread_mutex_init(pthread_mutex_t *mutex,
78 const pthread_mutexattr_t *mutex_attr);
79 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
80 int __pthread_mutex_lock(pthread_mutex_t *mutex);
81 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
82 const struct timespec *abstime);
83 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
84 void *(calloc_cb)(size_t, size_t));
85 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
86 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
87 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
88 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
89 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
90 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
92 static int mutex_self_trylock(pthread_mutex_t);
93 static int mutex_self_lock(pthread_mutex_t,
94 const struct timespec *abstime);
95 static int mutex_unlock_common(pthread_mutex_t *);
96 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
97 const struct timespec *);
99 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
108 /* Single underscore versions provided for libc internal usage: */
109 /* No difference between libc and application usage of these: */
110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
126 mutex_init(pthread_mutex_t *mutex,
127 const struct pthread_mutex_attr *mutex_attr,
128 void *(calloc_cb)(size_t, size_t))
130 const struct pthread_mutex_attr *attr;
131 struct pthread_mutex *pmutex;
133 if (mutex_attr == NULL) {
134 attr = &_pthread_mutexattr_default;
137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
140 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 attr->m_protocol > PTHREAD_PRIO_PROTECT)
144 if ((pmutex = (pthread_mutex_t)
145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
148 pmutex->m_type = attr->m_type;
149 pmutex->m_owner = NULL;
151 pmutex->m_refcount = 0;
152 pmutex->m_spinloops = 0;
153 pmutex->m_yieldloops = 0;
154 MUTEX_INIT_LINK(pmutex);
155 switch(attr->m_protocol) {
156 case PTHREAD_PRIO_NONE:
157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
158 pmutex->m_lock.m_flags = 0;
160 case PTHREAD_PRIO_INHERIT:
161 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
162 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
164 case PTHREAD_PRIO_PROTECT:
165 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
166 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
167 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
171 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
172 pmutex->m_spinloops =
173 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
174 pmutex->m_yieldloops = _thr_yieldloops;
182 init_static(struct pthread *thread, pthread_mutex_t *mutex)
186 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
188 if (*mutex == THR_MUTEX_INITIALIZER)
189 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
190 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
191 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
194 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
200 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
202 struct pthread_mutex *m2;
204 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
206 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
208 m->m_lock.m_ceilings[1] = -1;
212 __pthread_mutex_init(pthread_mutex_t *mutex,
213 const pthread_mutexattr_t *mutex_attr)
215 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
218 /* This function is used internally by malloc. */
220 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
221 void *(calloc_cb)(size_t, size_t))
223 static const struct pthread_mutex_attr attr = {
224 .m_type = PTHREAD_MUTEX_NORMAL,
225 .m_protocol = PTHREAD_PRIO_NONE,
229 return mutex_init(mutex, &attr, calloc_cb);
233 _mutex_fork(struct pthread *curthread)
235 struct pthread_mutex *m;
238 * Fix mutex ownership for child process.
239 * note that process shared mutex should not
240 * be inherited because owner is forking thread
241 * which is in parent process, they should be
242 * removed from the owned mutex list, current,
243 * process shared mutex is not supported, so I
247 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
248 m->m_lock.m_owner = TID(curthread);
249 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
250 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
254 _pthread_mutex_destroy(pthread_mutex_t *mutex)
256 struct pthread *curthread = _get_curthread();
262 if (m < THR_MUTEX_DESTROYED) {
264 } else if (m == THR_MUTEX_DESTROYED) {
270 * Try to lock the mutex structure, we only need to
271 * try once, if failed, the mutex is in used.
273 ret = _thr_umutex_trylock(&m->m_lock, id);
277 * Check mutex other fields to see if this mutex is
278 * in use. Mostly for prority mutex types, or there
279 * are condition variables referencing it.
281 if (m->m_owner != NULL || m->m_refcount != 0) {
282 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
283 set_inherited_priority(curthread, m);
284 _thr_umutex_unlock(&m->m_lock, id);
287 *mutex = THR_MUTEX_DESTROYED;
289 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
290 set_inherited_priority(curthread, m);
291 _thr_umutex_unlock(&m->m_lock, id);
293 MUTEX_ASSERT_NOT_OWNED(m);
301 #define ENQUEUE_MUTEX(curthread, m) \
303 (m)->m_owner = curthread; \
304 /* Add to the list of owned mutexes: */ \
305 MUTEX_ASSERT_NOT_OWNED((m)); \
306 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
307 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
309 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
312 #define CHECK_AND_INIT_MUTEX \
313 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
314 if (m == THR_MUTEX_DESTROYED) \
317 ret = init_static(_get_curthread(), mutex); \
324 mutex_trylock_common(pthread_mutex_t *mutex)
326 struct pthread *curthread = _get_curthread();
327 struct pthread_mutex *m = *mutex;
332 ret = _thr_umutex_trylock(&m->m_lock, id);
333 if (__predict_true(ret == 0)) {
334 ENQUEUE_MUTEX(curthread, m);
335 } else if (m->m_owner == curthread) {
336 ret = mutex_self_trylock(m);
343 __pthread_mutex_trylock(pthread_mutex_t *mutex)
345 struct pthread_mutex *m;
349 return (mutex_trylock_common(mutex));
353 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
354 const struct timespec *abstime)
360 if (m->m_owner == curthread)
361 return mutex_self_lock(m, abstime);
365 * For adaptive mutexes, spin for a bit in the expectation
366 * that if the application requests this mutex type then
367 * the lock is likely to be released quickly and it is
368 * faster than entering the kernel
372 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
373 goto sleep_in_kernel;
378 count = m->m_spinloops;
380 owner = m->m_lock.m_owner;
381 if ((owner & ~UMUTEX_CONTESTED) == 0) {
382 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
391 count = m->m_yieldloops;
394 owner = m->m_lock.m_owner;
395 if ((owner & ~UMUTEX_CONTESTED) == 0) {
396 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
404 if (abstime == NULL) {
405 ret = __thr_umutex_lock(&m->m_lock, id);
406 } else if (__predict_false(
407 abstime->tv_nsec < 0 ||
408 abstime->tv_nsec >= 1000000000)) {
411 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
415 ENQUEUE_MUTEX(curthread, m);
421 mutex_lock_common(struct pthread_mutex *m,
422 const struct timespec *abstime)
424 struct pthread *curthread = _get_curthread();
426 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
427 ENQUEUE_MUTEX(curthread, m);
431 return (mutex_lock_sleep(curthread, m, abstime));
435 __pthread_mutex_lock(pthread_mutex_t *mutex)
437 struct pthread_mutex *m;
443 return (mutex_lock_common(m, NULL));
447 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
449 struct pthread_mutex *m;
455 return (mutex_lock_common(m, abstime));
459 _pthread_mutex_unlock(pthread_mutex_t *m)
461 return (mutex_unlock_common(m));
465 _mutex_cv_lock(pthread_mutex_t *mutex, int count)
467 struct pthread_mutex *m;
471 ret = mutex_lock_common(m, NULL);
480 mutex_self_trylock(struct pthread_mutex *m)
485 case PTHREAD_MUTEX_ERRORCHECK:
486 case PTHREAD_MUTEX_NORMAL:
490 case PTHREAD_MUTEX_RECURSIVE:
491 /* Increment the lock count: */
492 if (m->m_count + 1 > 0) {
500 /* Trap invalid mutex types; */
508 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
510 struct timespec ts1, ts2;
514 case PTHREAD_MUTEX_ERRORCHECK:
515 case PTHREAD_MUTEX_ADAPTIVE_NP:
517 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
518 abstime->tv_nsec >= 1000000000) {
521 clock_gettime(CLOCK_REALTIME, &ts1);
522 TIMESPEC_SUB(&ts2, abstime, &ts1);
523 __sys_nanosleep(&ts2, NULL);
528 * POSIX specifies that mutexes should return
529 * EDEADLK if a recursive lock is detected.
535 case PTHREAD_MUTEX_NORMAL:
537 * What SS2 define as a 'normal' mutex. Intentionally
538 * deadlock on attempts to get a lock you already own.
542 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
543 abstime->tv_nsec >= 1000000000) {
546 clock_gettime(CLOCK_REALTIME, &ts1);
547 TIMESPEC_SUB(&ts2, abstime, &ts1);
548 __sys_nanosleep(&ts2, NULL);
555 __sys_nanosleep(&ts1, NULL);
559 case PTHREAD_MUTEX_RECURSIVE:
560 /* Increment the lock count: */
561 if (m->m_count + 1 > 0) {
569 /* Trap invalid mutex types; */
577 mutex_unlock_common(pthread_mutex_t *mutex)
579 struct pthread *curthread = _get_curthread();
580 struct pthread_mutex *m;
584 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
585 if (m == THR_MUTEX_DESTROYED)
591 * Check if the running thread is not the owner of the mutex.
593 if (__predict_false(m->m_owner != curthread))
598 m->m_type == PTHREAD_MUTEX_RECURSIVE &&
603 /* Remove the mutex from the threads queue. */
604 MUTEX_ASSERT_IS_OWNED(m);
605 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
606 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
608 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
609 set_inherited_priority(curthread, m);
612 _thr_umutex_unlock(&m->m_lock, id);
618 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
620 struct pthread *curthread = _get_curthread();
621 struct pthread_mutex *m;
624 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
625 if (m == THR_MUTEX_DESTROYED)
631 * Check if the running thread is not the owner of the mutex.
633 if (__predict_false(m->m_owner != curthread))
637 * Clear the count in case this is a recursive mutex.
643 /* Remove the mutex from the threads queue. */
644 MUTEX_ASSERT_IS_OWNED(m);
645 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
646 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
648 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
649 set_inherited_priority(curthread, m);
652 _thr_umutex_unlock(&m->m_lock, TID(curthread));
657 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
660 struct pthread_mutex *m;
664 if ((m <= THR_MUTEX_DESTROYED) ||
665 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
668 *prioceiling = m->m_lock.m_ceilings[0];
676 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
677 int ceiling, int *old_ceiling)
679 struct pthread *curthread = _get_curthread();
680 struct pthread_mutex *m, *m1, *m2;
684 if ((m <= THR_MUTEX_DESTROYED) ||
685 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
688 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
692 if (m->m_owner == curthread) {
693 MUTEX_ASSERT_IS_OWNED(m);
694 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
695 m2 = TAILQ_NEXT(m, m_qe);
696 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
697 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
698 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
699 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
700 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
701 TAILQ_INSERT_BEFORE(m2, m, m_qe);
705 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
712 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
714 struct pthread_mutex *m;
718 *count = m->m_spinloops;
723 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
725 struct pthread_mutex *m;
729 m->m_spinloops = count;
734 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
736 struct pthread_mutex *m;
740 *count = m->m_yieldloops;
745 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
747 struct pthread_mutex *m;
751 m->m_yieldloops = count;
756 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
758 struct pthread_mutex *m;
761 if (m <= THR_MUTEX_DESTROYED)
763 return (m->m_owner == _get_curthread());