2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "namespace.h"
40 #include <sys/param.h>
41 #include <sys/queue.h>
43 #include <pthread_np.h>
44 #include "un-namespace.h"
46 #include "thr_private.h"
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) do { \
50 (m)->m_qe.tqe_prev = NULL; \
51 (m)->m_qe.tqe_next = NULL; \
53 #define MUTEX_ASSERT_IS_OWNED(m) do { \
54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\
55 PANIC("mutex is not on list"); \
57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \
59 (m)->m_qe.tqe_next != NULL)) \
60 PANIC("mutex is on list"); \
63 #define MUTEX_INIT_LINK(m)
64 #define MUTEX_ASSERT_IS_OWNED(m)
65 #define MUTEX_ASSERT_NOT_OWNED(m)
69 * For adaptive mutexes, how many times to spin doing trylock2
70 * before entering the kernel to block
72 #define MUTEX_ADAPTIVE_SPINS 2000
77 int __pthread_mutex_init(pthread_mutex_t *mutex,
78 const pthread_mutexattr_t *mutex_attr);
79 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
80 int __pthread_mutex_lock(pthread_mutex_t *mutex);
81 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
82 const struct timespec *abstime);
83 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
84 void *(calloc_cb)(size_t, size_t));
85 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
86 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
87 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
88 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
89 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
90 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
92 static int mutex_self_trylock(pthread_mutex_t);
93 static int mutex_self_lock(pthread_mutex_t,
94 const struct timespec *abstime);
95 static int mutex_unlock_common(struct pthread_mutex *, int, int *);
96 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
97 const struct timespec *);
99 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
108 /* Single underscore versions provided for libc internal usage: */
109 /* No difference between libc and application usage of these: */
110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
126 mutex_init(pthread_mutex_t *mutex,
127 const struct pthread_mutex_attr *mutex_attr,
128 void *(calloc_cb)(size_t, size_t))
130 const struct pthread_mutex_attr *attr;
131 struct pthread_mutex *pmutex;
133 if (mutex_attr == NULL) {
134 attr = &_pthread_mutexattr_default;
137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
140 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 attr->m_protocol > PTHREAD_PRIO_PROTECT)
144 if ((pmutex = (pthread_mutex_t)
145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
148 pmutex->m_flags = attr->m_type;
149 pmutex->m_owner = NULL;
151 pmutex->m_spinloops = 0;
152 pmutex->m_yieldloops = 0;
153 MUTEX_INIT_LINK(pmutex);
154 switch(attr->m_protocol) {
155 case PTHREAD_PRIO_NONE:
156 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
157 pmutex->m_lock.m_flags = 0;
159 case PTHREAD_PRIO_INHERIT:
160 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
161 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
163 case PTHREAD_PRIO_PROTECT:
164 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
165 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
166 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
170 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
171 pmutex->m_spinloops =
172 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
173 pmutex->m_yieldloops = _thr_yieldloops;
181 init_static(struct pthread *thread, pthread_mutex_t *mutex)
185 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
187 if (*mutex == THR_MUTEX_INITIALIZER)
188 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
189 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
190 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
193 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
199 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
201 struct pthread_mutex *m2;
203 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
205 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
207 m->m_lock.m_ceilings[1] = -1;
211 __pthread_mutex_init(pthread_mutex_t *mutex,
212 const pthread_mutexattr_t *mutex_attr)
214 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
217 /* This function is used internally by malloc. */
219 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
220 void *(calloc_cb)(size_t, size_t))
222 static const struct pthread_mutex_attr attr = {
223 .m_type = PTHREAD_MUTEX_NORMAL,
224 .m_protocol = PTHREAD_PRIO_NONE,
229 ret = mutex_init(mutex, &attr, calloc_cb);
231 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
236 _mutex_fork(struct pthread *curthread)
238 struct pthread_mutex *m;
241 * Fix mutex ownership for child process.
242 * note that process shared mutex should not
243 * be inherited because owner is forking thread
244 * which is in parent process, they should be
245 * removed from the owned mutex list, current,
246 * process shared mutex is not supported, so I
250 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
251 m->m_lock.m_owner = TID(curthread);
252 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
253 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
257 _pthread_mutex_destroy(pthread_mutex_t *mutex)
263 if (m < THR_MUTEX_DESTROYED) {
265 } else if (m == THR_MUTEX_DESTROYED) {
268 if (m->m_owner != NULL) {
271 *mutex = THR_MUTEX_DESTROYED;
272 MUTEX_ASSERT_NOT_OWNED(m);
281 #define ENQUEUE_MUTEX(curthread, m) \
283 (m)->m_owner = curthread; \
284 /* Add to the list of owned mutexes: */ \
285 MUTEX_ASSERT_NOT_OWNED((m)); \
286 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
287 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
289 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
292 #define DEQUEUE_MUTEX(curthread, m) \
293 (m)->m_owner = NULL; \
294 MUTEX_ASSERT_IS_OWNED(m); \
295 if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \
296 TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \
298 TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \
299 set_inherited_priority(curthread, m); \
303 #define CHECK_AND_INIT_MUTEX \
304 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
305 if (m == THR_MUTEX_DESTROYED) \
308 ret = init_static(_get_curthread(), mutex); \
315 mutex_trylock_common(pthread_mutex_t *mutex)
317 struct pthread *curthread = _get_curthread();
318 struct pthread_mutex *m = *mutex;
323 if (m->m_flags & PMUTEX_FLAG_PRIVATE)
324 THR_CRITICAL_ENTER(curthread);
325 ret = _thr_umutex_trylock(&m->m_lock, id);
326 if (__predict_true(ret == 0)) {
327 ENQUEUE_MUTEX(curthread, m);
328 } else if (m->m_owner == curthread) {
329 ret = mutex_self_trylock(m);
331 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
332 THR_CRITICAL_LEAVE(curthread);
337 __pthread_mutex_trylock(pthread_mutex_t *mutex)
339 struct pthread_mutex *m;
343 return (mutex_trylock_common(mutex));
347 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
348 const struct timespec *abstime)
354 if (m->m_owner == curthread)
355 return mutex_self_lock(m, abstime);
359 * For adaptive mutexes, spin for a bit in the expectation
360 * that if the application requests this mutex type then
361 * the lock is likely to be released quickly and it is
362 * faster than entering the kernel
366 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
367 goto sleep_in_kernel;
372 count = m->m_spinloops;
374 owner = m->m_lock.m_owner;
375 if ((owner & ~UMUTEX_CONTESTED) == 0) {
376 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
385 count = m->m_yieldloops;
388 owner = m->m_lock.m_owner;
389 if ((owner & ~UMUTEX_CONTESTED) == 0) {
390 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
398 if (abstime == NULL) {
399 ret = __thr_umutex_lock(&m->m_lock, id);
400 } else if (__predict_false(
401 abstime->tv_nsec < 0 ||
402 abstime->tv_nsec >= 1000000000)) {
405 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
409 ENQUEUE_MUTEX(curthread, m);
415 mutex_lock_common(struct pthread_mutex *m,
416 const struct timespec *abstime, int cvattach)
418 struct pthread *curthread = _get_curthread();
421 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
422 THR_CRITICAL_ENTER(curthread);
423 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
424 ENQUEUE_MUTEX(curthread, m);
427 ret = mutex_lock_sleep(curthread, m, abstime);
429 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
430 THR_CRITICAL_LEAVE(curthread);
435 __pthread_mutex_lock(pthread_mutex_t *mutex)
437 struct pthread_mutex *m;
443 return (mutex_lock_common(m, NULL, 0));
447 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
449 struct pthread_mutex *m;
455 return (mutex_lock_common(m, abstime, 0));
459 _pthread_mutex_unlock(pthread_mutex_t *mutex)
461 struct pthread_mutex *mp;
464 return (mutex_unlock_common(mp, 0, NULL));
468 _mutex_cv_lock(struct pthread_mutex *m, int count)
472 error = mutex_lock_common(m, NULL, 1);
479 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
483 * Clear the count in case this is a recursive mutex.
487 (void)mutex_unlock_common(m, 1, defer);
492 _mutex_cv_attach(struct pthread_mutex *m, int count)
494 struct pthread *curthread = _get_curthread();
496 ENQUEUE_MUTEX(curthread, m);
502 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
504 struct pthread *curthread = _get_curthread();
508 if ((error = _mutex_owned(curthread, mp)) != 0)
512 * Clear the count in case this is a recursive mutex.
514 *recurse = mp->m_count;
516 DEQUEUE_MUTEX(curthread, mp);
518 /* Will this happen in real-world ? */
519 if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
521 mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
526 _thr_wake_all(curthread->defer_waiters,
527 curthread->nwaiter_defer);
528 curthread->nwaiter_defer = 0;
534 mutex_self_trylock(struct pthread_mutex *m)
538 switch (PMUTEX_TYPE(m->m_flags)) {
539 case PTHREAD_MUTEX_ERRORCHECK:
540 case PTHREAD_MUTEX_NORMAL:
541 case PTHREAD_MUTEX_ADAPTIVE_NP:
545 case PTHREAD_MUTEX_RECURSIVE:
546 /* Increment the lock count: */
547 if (m->m_count + 1 > 0) {
555 /* Trap invalid mutex types; */
563 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
565 struct timespec ts1, ts2;
568 switch (PMUTEX_TYPE(m->m_flags)) {
569 case PTHREAD_MUTEX_ERRORCHECK:
570 case PTHREAD_MUTEX_ADAPTIVE_NP:
572 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
573 abstime->tv_nsec >= 1000000000) {
576 clock_gettime(CLOCK_REALTIME, &ts1);
577 TIMESPEC_SUB(&ts2, abstime, &ts1);
578 __sys_nanosleep(&ts2, NULL);
583 * POSIX specifies that mutexes should return
584 * EDEADLK if a recursive lock is detected.
590 case PTHREAD_MUTEX_NORMAL:
592 * What SS2 define as a 'normal' mutex. Intentionally
593 * deadlock on attempts to get a lock you already own.
597 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
598 abstime->tv_nsec >= 1000000000) {
601 clock_gettime(CLOCK_REALTIME, &ts1);
602 TIMESPEC_SUB(&ts2, abstime, &ts1);
603 __sys_nanosleep(&ts2, NULL);
610 __sys_nanosleep(&ts1, NULL);
614 case PTHREAD_MUTEX_RECURSIVE:
615 /* Increment the lock count: */
616 if (m->m_count + 1 > 0) {
624 /* Trap invalid mutex types; */
632 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
634 struct pthread *curthread = _get_curthread();
638 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
639 if (m == THR_MUTEX_DESTROYED)
645 * Check if the running thread is not the owner of the mutex.
647 if (__predict_false(m->m_owner != curthread))
653 PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
657 if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
659 m->m_flags &= ~PMUTEX_FLAG_DEFERED;
663 DEQUEUE_MUTEX(curthread, m);
664 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
666 if (mtx_defer == NULL && defered) {
667 _thr_wake_all(curthread->defer_waiters,
668 curthread->nwaiter_defer);
669 curthread->nwaiter_defer = 0;
672 if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
673 THR_CRITICAL_LEAVE(curthread);
678 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
681 struct pthread_mutex *m;
685 if ((m <= THR_MUTEX_DESTROYED) ||
686 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
689 *prioceiling = m->m_lock.m_ceilings[0];
697 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
698 int ceiling, int *old_ceiling)
700 struct pthread *curthread = _get_curthread();
701 struct pthread_mutex *m, *m1, *m2;
705 if ((m <= THR_MUTEX_DESTROYED) ||
706 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
709 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
713 if (m->m_owner == curthread) {
714 MUTEX_ASSERT_IS_OWNED(m);
715 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
716 m2 = TAILQ_NEXT(m, m_qe);
717 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
718 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
719 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
720 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
721 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
722 TAILQ_INSERT_BEFORE(m2, m, m_qe);
726 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
733 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
735 struct pthread_mutex *m;
739 *count = m->m_spinloops;
744 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
746 struct pthread_mutex *m;
750 m->m_spinloops = count;
755 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
757 struct pthread_mutex *m;
761 *count = m->m_yieldloops;
766 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
768 struct pthread_mutex *m;
772 m->m_yieldloops = count;
777 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
779 struct pthread_mutex *m;
782 if (m <= THR_MUTEX_DESTROYED)
784 return (m->m_owner == _get_curthread());
788 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
790 if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
791 if (mp == THR_MUTEX_DESTROYED)
795 if (mp->m_owner != curthread)