2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * Copyright (c) 2015 The FreeBSD Foundation
8 * Portions of this software were developed by Konstantin Belousov
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by John Birrell.
22 * 4. Neither the name of the author nor the names of any co-contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 #include "namespace.h"
45 #include <sys/param.h>
46 #include <sys/queue.h>
48 #include <pthread_np.h>
49 #include "un-namespace.h"
51 #include "thr_private.h"
54 * For adaptive mutexes, how many times to spin doing trylock2
55 * before entering the kernel to block
57 #define MUTEX_ADAPTIVE_SPINS 2000
62 int __pthread_mutex_init(pthread_mutex_t *mutex,
63 const pthread_mutexattr_t *mutex_attr);
64 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
65 int __pthread_mutex_lock(pthread_mutex_t *mutex);
66 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
67 const struct timespec *abstime);
68 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
69 void *(calloc_cb)(size_t, size_t));
70 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
71 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
72 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
73 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
74 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
75 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
77 static int mutex_self_trylock(pthread_mutex_t);
78 static int mutex_self_lock(pthread_mutex_t,
79 const struct timespec *abstime);
80 static int mutex_unlock_common(struct pthread_mutex *, int, int *);
81 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
82 const struct timespec *);
84 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
85 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
86 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
87 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
88 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
89 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
91 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
93 /* Single underscore versions provided for libc internal usage: */
94 /* No difference between libc and application usage of these: */
95 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
96 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
98 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
99 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
101 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
102 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
103 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
105 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
106 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
107 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
108 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
111 mutex_init_link(struct pthread_mutex *m)
114 #if defined(_PTHREADS_INVARIANTS)
115 m->m_qe.tqe_prev = NULL;
116 m->m_qe.tqe_next = NULL;
117 m->m_pqe.tqe_prev = NULL;
118 m->m_pqe.tqe_next = NULL;
123 mutex_assert_is_owned(struct pthread_mutex *m)
126 #if defined(_PTHREADS_INVARIANTS)
127 if (__predict_false(m->m_qe.tqe_prev == NULL))
128 PANIC("mutex is not on list");
133 mutex_assert_not_owned(struct pthread_mutex *m)
136 #if defined(_PTHREADS_INVARIANTS)
137 if (__predict_false(m->m_qe.tqe_prev != NULL ||
138 m->m_qe.tqe_next != NULL))
139 PANIC("mutex is on list");
144 is_pshared_mutex(struct pthread_mutex *m)
147 return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
151 mutex_check_attr(const struct pthread_mutex_attr *attr)
154 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
155 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
157 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
158 attr->m_protocol > PTHREAD_PRIO_PROTECT)
164 mutex_init_body(struct pthread_mutex *pmutex,
165 const struct pthread_mutex_attr *attr)
168 pmutex->m_flags = attr->m_type;
171 pmutex->m_spinloops = 0;
172 pmutex->m_yieldloops = 0;
173 mutex_init_link(pmutex);
174 switch (attr->m_protocol) {
175 case PTHREAD_PRIO_NONE:
176 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
177 pmutex->m_lock.m_flags = 0;
179 case PTHREAD_PRIO_INHERIT:
180 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
181 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
183 case PTHREAD_PRIO_PROTECT:
184 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
185 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
186 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
189 if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
190 pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
192 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
193 pmutex->m_spinloops =
194 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
195 pmutex->m_yieldloops = _thr_yieldloops;
200 mutex_init(pthread_mutex_t *mutex,
201 const struct pthread_mutex_attr *mutex_attr,
202 void *(calloc_cb)(size_t, size_t))
204 const struct pthread_mutex_attr *attr;
205 struct pthread_mutex *pmutex;
208 if (mutex_attr == NULL) {
209 attr = &_pthread_mutexattr_default;
212 error = mutex_check_attr(attr);
216 if ((pmutex = (pthread_mutex_t)
217 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
219 mutex_init_body(pmutex, attr);
225 init_static(struct pthread *thread, pthread_mutex_t *mutex)
229 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
231 if (*mutex == THR_MUTEX_INITIALIZER)
232 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
233 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
234 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
238 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
244 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
246 struct pthread_mutex *m2;
248 m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
250 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
252 m->m_lock.m_ceilings[1] = -1;
256 __pthread_mutex_init(pthread_mutex_t *mutex,
257 const pthread_mutexattr_t *mutex_attr)
259 struct pthread_mutex *pmtx;
262 if (mutex_attr != NULL) {
263 ret = mutex_check_attr(*mutex_attr);
267 if (mutex_attr == NULL ||
268 (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
269 return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
272 pmtx = __thr_pshared_offpage(mutex, 1);
275 *mutex = THR_PSHARED_PTR;
276 mutex_init_body(pmtx, *mutex_attr);
280 /* This function is used internally by malloc. */
282 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
283 void *(calloc_cb)(size_t, size_t))
285 static const struct pthread_mutex_attr attr = {
286 .m_type = PTHREAD_MUTEX_NORMAL,
287 .m_protocol = PTHREAD_PRIO_NONE,
289 .m_pshared = PTHREAD_PROCESS_PRIVATE,
293 ret = mutex_init(mutex, &attr, calloc_cb);
295 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
300 * Fix mutex ownership for child process.
302 * Process private mutex ownership is transmitted from the forking
303 * thread to the child process.
305 * Process shared mutex should not be inherited because owner is
306 * forking thread which is in parent process, they are removed from
307 * the owned mutex list.
310 queue_fork(struct pthread *curthread, struct mutex_queue *q,
311 struct mutex_queue *qp, uint bit)
313 struct pthread_mutex *m;
316 TAILQ_FOREACH(m, qp, m_pqe) {
317 TAILQ_INSERT_TAIL(q, m, m_qe);
318 m->m_lock.m_owner = TID(curthread) | bit;
319 m->m_owner = TID(curthread);
324 _mutex_fork(struct pthread *curthread)
327 queue_fork(curthread, &curthread->mq[TMQ_NORM],
328 &curthread->mq[TMQ_NORM_PRIV], 0);
329 queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
330 &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
334 _pthread_mutex_destroy(pthread_mutex_t *mutex)
336 pthread_mutex_t m, m1;
340 if (m < THR_MUTEX_DESTROYED) {
342 } else if (m == THR_MUTEX_DESTROYED) {
345 if (m == THR_PSHARED_PTR) {
346 m1 = __thr_pshared_offpage(mutex, 0);
348 mutex_assert_not_owned(m1);
349 __thr_pshared_destroy(mutex);
351 *mutex = THR_MUTEX_DESTROYED;
354 if (m->m_owner != 0) {
357 *mutex = THR_MUTEX_DESTROYED;
358 mutex_assert_not_owned(m);
368 mutex_qidx(struct pthread_mutex *m)
371 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
373 return (TMQ_NORM_PP);
377 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
381 m->m_owner = TID(curthread);
382 /* Add to the list of owned mutexes: */
383 mutex_assert_not_owned(m);
384 qidx = mutex_qidx(m);
385 TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
386 if (!is_pshared_mutex(m))
387 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
391 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
396 mutex_assert_is_owned(m);
397 qidx = mutex_qidx(m);
398 TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
399 if (!is_pshared_mutex(m))
400 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
401 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
402 set_inherited_priority(curthread, m);
407 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
413 if (*m == THR_PSHARED_PTR) {
414 *m = __thr_pshared_offpage(mutex, 0);
417 } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
418 if (*m == THR_MUTEX_DESTROYED) {
421 ret = init_static(_get_curthread(), mutex);
430 __pthread_mutex_trylock(pthread_mutex_t *mutex)
432 struct pthread *curthread;
433 struct pthread_mutex *m;
437 ret = check_and_init_mutex(mutex, &m);
440 curthread = _get_curthread();
442 if (m->m_flags & PMUTEX_FLAG_PRIVATE)
443 THR_CRITICAL_ENTER(curthread);
444 ret = _thr_umutex_trylock(&m->m_lock, id);
445 if (__predict_true(ret == 0)) {
446 enqueue_mutex(curthread, m);
447 } else if (m->m_owner == id) {
448 ret = mutex_self_trylock(m);
450 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
451 THR_CRITICAL_LEAVE(curthread);
456 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
457 const struct timespec *abstime)
464 if (m->m_owner == id)
465 return (mutex_self_lock(m, abstime));
468 * For adaptive mutexes, spin for a bit in the expectation
469 * that if the application requests this mutex type then
470 * the lock is likely to be released quickly and it is
471 * faster than entering the kernel
475 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
476 goto sleep_in_kernel;
481 count = m->m_spinloops;
483 owner = m->m_lock.m_owner;
484 if ((owner & ~UMUTEX_CONTESTED) == 0) {
485 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
494 count = m->m_yieldloops;
497 owner = m->m_lock.m_owner;
498 if ((owner & ~UMUTEX_CONTESTED) == 0) {
499 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
507 if (abstime == NULL) {
508 ret = __thr_umutex_lock(&m->m_lock, id);
509 } else if (__predict_false(
510 abstime->tv_nsec < 0 ||
511 abstime->tv_nsec >= 1000000000)) {
514 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
518 enqueue_mutex(curthread, m);
524 mutex_lock_common(struct pthread_mutex *m,
525 const struct timespec *abstime, int cvattach)
527 struct pthread *curthread = _get_curthread();
530 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
531 THR_CRITICAL_ENTER(curthread);
532 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
533 enqueue_mutex(curthread, m);
536 ret = mutex_lock_sleep(curthread, m, abstime);
538 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
539 THR_CRITICAL_LEAVE(curthread);
544 __pthread_mutex_lock(pthread_mutex_t *mutex)
546 struct pthread_mutex *m;
550 ret = check_and_init_mutex(mutex, &m);
552 ret = mutex_lock_common(m, NULL, 0);
557 __pthread_mutex_timedlock(pthread_mutex_t *mutex,
558 const struct timespec *abstime)
560 struct pthread_mutex *m;
564 ret = check_and_init_mutex(mutex, &m);
566 ret = mutex_lock_common(m, abstime, 0);
571 _pthread_mutex_unlock(pthread_mutex_t *mutex)
573 struct pthread_mutex *mp;
575 if (*mutex == THR_PSHARED_PTR) {
576 mp = __thr_pshared_offpage(mutex, 0);
582 return (mutex_unlock_common(mp, 0, NULL));
586 _mutex_cv_lock(struct pthread_mutex *m, int count)
590 error = mutex_lock_common(m, NULL, 1);
597 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
601 * Clear the count in case this is a recursive mutex.
605 (void)mutex_unlock_common(m, 1, defer);
610 _mutex_cv_attach(struct pthread_mutex *m, int count)
612 struct pthread *curthread = _get_curthread();
614 enqueue_mutex(curthread, m);
620 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
622 struct pthread *curthread = _get_curthread();
626 if ((error = _mutex_owned(curthread, mp)) != 0)
630 * Clear the count in case this is a recursive mutex.
632 *recurse = mp->m_count;
634 dequeue_mutex(curthread, mp);
636 /* Will this happen in real-world ? */
637 if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
639 mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
644 _thr_wake_all(curthread->defer_waiters,
645 curthread->nwaiter_defer);
646 curthread->nwaiter_defer = 0;
652 mutex_self_trylock(struct pthread_mutex *m)
656 switch (PMUTEX_TYPE(m->m_flags)) {
657 case PTHREAD_MUTEX_ERRORCHECK:
658 case PTHREAD_MUTEX_NORMAL:
659 case PTHREAD_MUTEX_ADAPTIVE_NP:
663 case PTHREAD_MUTEX_RECURSIVE:
664 /* Increment the lock count: */
665 if (m->m_count + 1 > 0) {
673 /* Trap invalid mutex types; */
681 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
683 struct timespec ts1, ts2;
686 switch (PMUTEX_TYPE(m->m_flags)) {
687 case PTHREAD_MUTEX_ERRORCHECK:
688 case PTHREAD_MUTEX_ADAPTIVE_NP:
690 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
691 abstime->tv_nsec >= 1000000000) {
694 clock_gettime(CLOCK_REALTIME, &ts1);
695 TIMESPEC_SUB(&ts2, abstime, &ts1);
696 __sys_nanosleep(&ts2, NULL);
701 * POSIX specifies that mutexes should return
702 * EDEADLK if a recursive lock is detected.
708 case PTHREAD_MUTEX_NORMAL:
710 * What SS2 define as a 'normal' mutex. Intentionally
711 * deadlock on attempts to get a lock you already own.
715 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
716 abstime->tv_nsec >= 1000000000) {
719 clock_gettime(CLOCK_REALTIME, &ts1);
720 TIMESPEC_SUB(&ts2, abstime, &ts1);
721 __sys_nanosleep(&ts2, NULL);
728 __sys_nanosleep(&ts1, NULL);
732 case PTHREAD_MUTEX_RECURSIVE:
733 /* Increment the lock count: */
734 if (m->m_count + 1 > 0) {
742 /* Trap invalid mutex types; */
750 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
752 struct pthread *curthread = _get_curthread();
756 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
757 if (m == THR_MUTEX_DESTROYED)
765 * Check if the running thread is not the owner of the mutex.
767 if (__predict_false(m->m_owner != id))
772 PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
776 if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
778 m->m_flags &= ~PMUTEX_FLAG_DEFERED;
782 dequeue_mutex(curthread, m);
783 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
785 if (mtx_defer == NULL && defered) {
786 _thr_wake_all(curthread->defer_waiters,
787 curthread->nwaiter_defer);
788 curthread->nwaiter_defer = 0;
791 if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
792 THR_CRITICAL_LEAVE(curthread);
797 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
800 struct pthread_mutex *m;
802 if (*mutex == THR_PSHARED_PTR) {
803 m = __thr_pshared_offpage(mutex, 0);
808 if (m <= THR_MUTEX_DESTROYED)
811 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
813 *prioceiling = m->m_lock.m_ceilings[0];
818 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
819 int ceiling, int *old_ceiling)
821 struct pthread *curthread;
822 struct pthread_mutex *m, *m1, *m2;
823 struct mutex_queue *q, *qp;
826 if (*mutex == THR_PSHARED_PTR) {
827 m = __thr_pshared_offpage(mutex, 0);
832 if (m <= THR_MUTEX_DESTROYED)
835 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
838 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
842 curthread = _get_curthread();
843 if (m->m_owner == TID(curthread)) {
844 mutex_assert_is_owned(m);
845 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
846 m2 = TAILQ_NEXT(m, m_qe);
847 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
848 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
849 q = &curthread->mq[TMQ_NORM_PP];
850 qp = &curthread->mq[TMQ_NORM_PP_PRIV];
851 TAILQ_REMOVE(q, m, m_qe);
852 if (!is_pshared_mutex(m))
853 TAILQ_REMOVE(qp, m, m_pqe);
854 TAILQ_FOREACH(m2, q, m_qe) {
855 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
856 TAILQ_INSERT_BEFORE(m2, m, m_qe);
857 if (!is_pshared_mutex(m)) {
859 is_pshared_mutex(m2)) {
864 TAILQ_INSERT_HEAD(qp,
867 TAILQ_INSERT_BEFORE(m2,
874 TAILQ_INSERT_TAIL(q, m, m_qe);
875 if (!is_pshared_mutex(m))
876 TAILQ_INSERT_TAIL(qp, m, m_pqe);
883 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
885 struct pthread_mutex *m;
888 ret = check_and_init_mutex(mutex, &m);
890 *count = m->m_spinloops;
895 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
897 struct pthread_mutex *m;
900 ret = check_and_init_mutex(mutex, &m);
902 m->m_spinloops = count;
907 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
909 struct pthread_mutex *m;
912 ret = check_and_init_mutex(mutex, &m);
914 *count = m->m_yieldloops;
919 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
921 struct pthread_mutex *m;
924 ret = check_and_init_mutex(mutex, &m);
926 m->m_yieldloops = count;
931 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
933 struct pthread_mutex *m;
935 if (*mutex == THR_PSHARED_PTR) {
936 m = __thr_pshared_offpage(mutex, 0);
941 if (m <= THR_MUTEX_DESTROYED)
944 return (m->m_owner == TID(_get_curthread()));
948 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
950 if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
951 if (mp == THR_MUTEX_DESTROYED)
955 if (mp->m_owner != TID(curthread))