2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "namespace.h"
40 #include <sys/param.h>
41 #include <sys/queue.h>
43 #include <pthread_np.h>
44 #include "un-namespace.h"
46 #include "thr_private.h"
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) do { \
50 (m)->m_qe.tqe_prev = NULL; \
51 (m)->m_qe.tqe_next = NULL; \
53 #define MUTEX_ASSERT_IS_OWNED(m) do { \
54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\
55 PANIC("mutex is not on list"); \
57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \
59 (m)->m_qe.tqe_next != NULL)) \
60 PANIC("mutex is on list"); \
63 #define MUTEX_INIT_LINK(m)
64 #define MUTEX_ASSERT_IS_OWNED(m)
65 #define MUTEX_ASSERT_NOT_OWNED(m)
69 * For adaptive mutexes, how many times to spin doing trylock2
70 * before entering the kernel to block
72 #define MUTEX_ADAPTIVE_SPINS 2000
77 int __pthread_mutex_init(pthread_mutex_t *mutex,
78 const pthread_mutexattr_t *mutex_attr);
79 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
80 int __pthread_mutex_lock(pthread_mutex_t *mutex);
81 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
82 const struct timespec *abstime);
83 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
84 void *(calloc_cb)(size_t, size_t));
85 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
86 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
87 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
88 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
89 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
90 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
92 static int mutex_self_trylock(pthread_mutex_t);
93 static int mutex_self_lock(pthread_mutex_t,
94 const struct timespec *abstime);
95 static int mutex_unlock_common(pthread_mutex_t *);
96 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
97 const struct timespec *);
99 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
108 /* Single underscore versions provided for libc internal usage: */
109 /* No difference between libc and application usage of these: */
110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
126 mutex_init(pthread_mutex_t *mutex,
127 const pthread_mutexattr_t *mutex_attr,
128 void *(calloc_cb)(size_t, size_t))
130 const struct pthread_mutex_attr *attr;
131 struct pthread_mutex *pmutex;
133 if (mutex_attr == NULL) {
134 attr = &_pthread_mutexattr_default;
137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
140 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 attr->m_protocol > PTHREAD_PRIO_PROTECT)
144 if ((pmutex = (pthread_mutex_t)
145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
148 pmutex->m_type = attr->m_type;
149 pmutex->m_owner = NULL;
151 pmutex->m_refcount = 0;
152 pmutex->m_spinloops = 0;
153 pmutex->m_yieldloops = 0;
154 MUTEX_INIT_LINK(pmutex);
155 switch(attr->m_protocol) {
156 case PTHREAD_PRIO_INHERIT:
157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
158 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
160 case PTHREAD_PRIO_PROTECT:
161 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
162 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
163 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
165 case PTHREAD_PRIO_NONE:
166 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
167 pmutex->m_lock.m_flags = 0;
170 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
171 pmutex->m_spinloops =
172 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
173 pmutex->m_yieldloops = _thr_yieldloops;
181 init_static(struct pthread *thread, pthread_mutex_t *mutex)
185 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
188 ret = mutex_init(mutex, NULL, calloc);
192 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
198 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
200 struct pthread_mutex *m2;
202 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
204 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
206 m->m_lock.m_ceilings[1] = -1;
210 __pthread_mutex_init(pthread_mutex_t *mutex,
211 const pthread_mutexattr_t *mutex_attr)
213 return mutex_init(mutex, mutex_attr, calloc);
216 /* This function is used internally by malloc. */
218 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
219 void *(calloc_cb)(size_t, size_t))
221 static const struct pthread_mutex_attr attr = {
222 .m_type = PTHREAD_MUTEX_NORMAL,
223 .m_protocol = PTHREAD_PRIO_NONE,
226 static const struct pthread_mutex_attr *pattr = &attr;
229 ret = mutex_init(mutex, (pthread_mutexattr_t *)&pattr, calloc_cb);
231 (*mutex)->m_private = 1;
236 _mutex_fork(struct pthread *curthread)
238 struct pthread_mutex *m;
241 * Fix mutex ownership for child process.
242 * note that process shared mutex should not
243 * be inherited because owner is forking thread
244 * which is in parent process, they should be
245 * removed from the owned mutex list, current,
246 * process shared mutex is not supported, so I
250 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
251 m->m_lock.m_owner = TID(curthread);
252 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
253 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
257 _pthread_mutex_destroy(pthread_mutex_t *mutex)
259 struct pthread *curthread = _get_curthread();
264 if (__predict_false(*mutex == NULL))
270 * Try to lock the mutex structure, we only need to
271 * try once, if failed, the mutex is in used.
273 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
278 * Check mutex other fields to see if this mutex is
279 * in use. Mostly for prority mutex types, or there
280 * are condition variables referencing it.
282 if (m->m_owner != NULL || m->m_refcount != 0) {
283 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
284 set_inherited_priority(curthread, m);
285 _thr_umutex_unlock(&m->m_lock, id);
289 * Save a pointer to the mutex so it can be free'd
290 * and set the caller's pointer to NULL.
294 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
295 set_inherited_priority(curthread, m);
296 _thr_umutex_unlock(&m->m_lock, id);
298 MUTEX_ASSERT_NOT_OWNED(m);
306 #define ENQUEUE_MUTEX(curthread, m) \
308 (m)->m_owner = curthread; \
309 /* Add to the list of owned mutexes: */ \
310 MUTEX_ASSERT_NOT_OWNED((m)); \
311 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
312 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
314 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
318 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
320 struct pthread_mutex *m;
327 THR_CRITICAL_ENTER(curthread);
328 ret = _thr_umutex_trylock(&m->m_lock, id);
330 ENQUEUE_MUTEX(curthread, m);
331 } else if (m->m_owner == curthread) {
332 ret = mutex_self_trylock(m);
334 if (ret && m->m_private)
335 THR_CRITICAL_LEAVE(curthread);
340 __pthread_mutex_trylock(pthread_mutex_t *mutex)
342 struct pthread *curthread = _get_curthread();
346 * If the mutex is statically initialized, perform the dynamic
349 if (__predict_false(*mutex == NULL)) {
350 ret = init_static(curthread, mutex);
351 if (__predict_false(ret))
354 return (mutex_trylock_common(curthread, mutex));
358 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
359 const struct timespec *abstime)
365 if (m->m_owner == curthread)
366 return mutex_self_lock(m, abstime);
370 * For adaptive mutexes, spin for a bit in the expectation
371 * that if the application requests this mutex type then
372 * the lock is likely to be released quickly and it is
373 * faster than entering the kernel
375 if (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT))
376 goto sleep_in_kernel;
381 count = m->m_spinloops;
383 owner = m->m_lock.m_owner;
384 if ((owner & ~UMUTEX_CONTESTED) == 0) {
385 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
394 count = m->m_yieldloops;
397 owner = m->m_lock.m_owner;
398 if ((owner & ~UMUTEX_CONTESTED) == 0) {
399 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
407 if (abstime == NULL) {
408 ret = __thr_umutex_lock(&m->m_lock, id);
409 } else if (__predict_false(
410 abstime->tv_nsec < 0 ||
411 abstime->tv_nsec >= 1000000000)) {
414 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
418 ENQUEUE_MUTEX(curthread, m);
424 mutex_lock_common(struct pthread *curthread, struct pthread_mutex *m,
425 const struct timespec *abstime)
430 THR_CRITICAL_ENTER(curthread);
431 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
432 ENQUEUE_MUTEX(curthread, m);
435 ret = mutex_lock_sleep(curthread, m, abstime);
437 if (ret && m->m_private)
438 THR_CRITICAL_LEAVE(curthread);
443 __pthread_mutex_lock(pthread_mutex_t *mutex)
445 struct pthread *curthread;
446 struct pthread_mutex *m;
451 curthread = _get_curthread();
454 * If the mutex is statically initialized, perform the dynamic
457 if (__predict_false((m = *mutex) == NULL)) {
458 ret = init_static(curthread, mutex);
459 if (__predict_false(ret))
464 return (mutex_lock_common(curthread, m, NULL));
468 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
470 struct pthread *curthread;
471 struct pthread_mutex *m;
476 curthread = _get_curthread();
479 * If the mutex is statically initialized, perform the dynamic
482 if (__predict_false((m = *mutex) == NULL)) {
483 ret = init_static(curthread, mutex);
484 if (__predict_false(ret))
488 return (mutex_lock_common(curthread, m, abstime));
492 _pthread_mutex_unlock(pthread_mutex_t *m)
494 return (mutex_unlock_common(m));
498 _mutex_cv_lock(pthread_mutex_t *m, int count)
502 ret = mutex_lock_common(_get_curthread(), *m, NULL);
505 (*m)->m_count += count;
511 mutex_self_trylock(pthread_mutex_t m)
516 case PTHREAD_MUTEX_ERRORCHECK:
517 case PTHREAD_MUTEX_NORMAL:
521 case PTHREAD_MUTEX_RECURSIVE:
522 /* Increment the lock count: */
523 if (m->m_count + 1 > 0) {
531 /* Trap invalid mutex types; */
539 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
541 struct timespec ts1, ts2;
545 case PTHREAD_MUTEX_ERRORCHECK:
546 case PTHREAD_MUTEX_ADAPTIVE_NP:
548 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
549 abstime->tv_nsec >= 1000000000) {
552 clock_gettime(CLOCK_REALTIME, &ts1);
553 TIMESPEC_SUB(&ts2, abstime, &ts1);
554 __sys_nanosleep(&ts2, NULL);
559 * POSIX specifies that mutexes should return
560 * EDEADLK if a recursive lock is detected.
566 case PTHREAD_MUTEX_NORMAL:
568 * What SS2 define as a 'normal' mutex. Intentionally
569 * deadlock on attempts to get a lock you already own.
573 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
574 abstime->tv_nsec >= 1000000000) {
577 clock_gettime(CLOCK_REALTIME, &ts1);
578 TIMESPEC_SUB(&ts2, abstime, &ts1);
579 __sys_nanosleep(&ts2, NULL);
586 __sys_nanosleep(&ts1, NULL);
590 case PTHREAD_MUTEX_RECURSIVE:
591 /* Increment the lock count: */
592 if (m->m_count + 1 > 0) {
600 /* Trap invalid mutex types; */
608 mutex_unlock_common(pthread_mutex_t *mutex)
610 struct pthread *curthread = _get_curthread();
611 struct pthread_mutex *m;
614 if (__predict_false((m = *mutex) == NULL))
618 * Check if the running thread is not the owner of the mutex.
620 if (__predict_false(m->m_owner != curthread))
625 m->m_type == PTHREAD_MUTEX_RECURSIVE &&
630 /* Remove the mutex from the threads queue. */
631 MUTEX_ASSERT_IS_OWNED(m);
632 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
633 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
635 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
636 set_inherited_priority(curthread, m);
639 _thr_umutex_unlock(&m->m_lock, id);
642 THR_CRITICAL_LEAVE(curthread);
647 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
649 struct pthread *curthread = _get_curthread();
650 struct pthread_mutex *m;
652 if (__predict_false((m = *mutex) == NULL))
656 * Check if the running thread is not the owner of the mutex.
658 if (__predict_false(m->m_owner != curthread))
662 * Clear the count in case this is a recursive mutex.
668 /* Remove the mutex from the threads queue. */
669 MUTEX_ASSERT_IS_OWNED(m);
670 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
671 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
673 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
674 set_inherited_priority(curthread, m);
677 _thr_umutex_unlock(&m->m_lock, TID(curthread));
680 THR_CRITICAL_LEAVE(curthread);
685 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
692 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
695 *prioceiling = (*mutex)->m_lock.m_ceilings[0];
703 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
704 int ceiling, int *old_ceiling)
706 struct pthread *curthread = _get_curthread();
707 struct pthread_mutex *m, *m1, *m2;
711 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
714 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
718 if (m->m_owner == curthread) {
719 MUTEX_ASSERT_IS_OWNED(m);
720 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
721 m2 = TAILQ_NEXT(m, m_qe);
722 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
723 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
724 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
725 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
726 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
727 TAILQ_INSERT_BEFORE(m2, m, m_qe);
731 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
738 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
742 *count = (*mutex)->m_spinloops;
747 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
749 struct pthread *curthread = _get_curthread();
752 if (__predict_false(*mutex == NULL)) {
753 ret = init_static(curthread, mutex);
754 if (__predict_false(ret))
757 (*mutex)->m_spinloops = count;
762 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
766 *count = (*mutex)->m_yieldloops;
771 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
773 struct pthread *curthread = _get_curthread();
776 if (__predict_false(*mutex == NULL)) {
777 ret = init_static(curthread, mutex);
778 if (__predict_false(ret))
781 (*mutex)->m_yieldloops = count;
786 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
788 struct pthread *curthread = _get_curthread();
791 if (__predict_false(*mutex == NULL)) {
792 ret = init_static(curthread, mutex);
793 if (__predict_false(ret))
796 return ((*mutex)->m_owner == curthread);