2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * Copyright (c) 2015 The FreeBSD Foundation
8 * Portions of this software were developed by Konstantin Belousov
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by John Birrell.
22 * 4. Neither the name of the author nor the names of any co-contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #include "namespace.h"
46 #include <sys/param.h>
47 #include <sys/queue.h>
49 #include <pthread_np.h>
50 #include "un-namespace.h"
52 #include "thr_private.h"
55 * For adaptive mutexes, how many times to spin doing trylock2
56 * before entering the kernel to block
58 #define MUTEX_ADAPTIVE_SPINS 2000
63 int __pthread_mutex_init(pthread_mutex_t *mutex,
64 const pthread_mutexattr_t *mutex_attr);
65 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
66 int __pthread_mutex_lock(pthread_mutex_t *mutex);
67 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
68 const struct timespec *abstime);
69 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
70 void *(calloc_cb)(size_t, size_t));
71 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
72 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
73 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
74 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
75 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
76 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
78 static int mutex_self_trylock(pthread_mutex_t);
79 static int mutex_self_lock(pthread_mutex_t,
80 const struct timespec *abstime);
81 static int mutex_unlock_common(struct pthread_mutex *, int, int *);
82 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
83 const struct timespec *);
85 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
86 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
87 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
88 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
89 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
90 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
91 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
92 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
94 /* Single underscore versions provided for libc internal usage: */
95 /* No difference between libc and application usage of these: */
96 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
97 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
99 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
100 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
102 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
103 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
104 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
106 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
107 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
108 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
109 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
112 mutex_init_link(struct pthread_mutex *m)
115 #if defined(_PTHREADS_INVARIANTS)
116 m->m_qe.tqe_prev = NULL;
117 m->m_qe.tqe_next = NULL;
118 m->m_pqe.tqe_prev = NULL;
119 m->m_pqe.tqe_next = NULL;
124 mutex_assert_is_owned(struct pthread_mutex *m)
127 #if defined(_PTHREADS_INVARIANTS)
128 if (__predict_false(m->m_qe.tqe_prev == NULL)) {
130 snprintf(msg, sizeof(msg),
131 "mutex %p own %#x %#x is not on list %p %p",
132 m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
140 mutex_assert_not_owned(struct pthread_mutex *m)
143 #if defined(_PTHREADS_INVARIANTS)
144 if (__predict_false(m->m_qe.tqe_prev != NULL ||
145 m->m_qe.tqe_next != NULL)) {
147 snprintf(msg, sizeof(msg),
148 "mutex %p own %#x %#x is on list %p %p",
149 m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
157 is_pshared_mutex(struct pthread_mutex *m)
160 return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
164 mutex_check_attr(const struct pthread_mutex_attr *attr)
167 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
168 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
170 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
171 attr->m_protocol > PTHREAD_PRIO_PROTECT)
177 mutex_init_body(struct pthread_mutex *pmutex,
178 const struct pthread_mutex_attr *attr)
181 pmutex->m_flags = attr->m_type;
184 pmutex->m_spinloops = 0;
185 pmutex->m_yieldloops = 0;
186 mutex_init_link(pmutex);
187 switch (attr->m_protocol) {
188 case PTHREAD_PRIO_NONE:
189 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
190 pmutex->m_lock.m_flags = 0;
192 case PTHREAD_PRIO_INHERIT:
193 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
194 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
196 case PTHREAD_PRIO_PROTECT:
197 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
198 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
199 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
202 if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
203 pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
205 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
206 pmutex->m_spinloops =
207 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
208 pmutex->m_yieldloops = _thr_yieldloops;
213 mutex_init(pthread_mutex_t *mutex,
214 const struct pthread_mutex_attr *mutex_attr,
215 void *(calloc_cb)(size_t, size_t))
217 const struct pthread_mutex_attr *attr;
218 struct pthread_mutex *pmutex;
221 if (mutex_attr == NULL) {
222 attr = &_pthread_mutexattr_default;
225 error = mutex_check_attr(attr);
229 if ((pmutex = (pthread_mutex_t)
230 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
232 mutex_init_body(pmutex, attr);
238 init_static(struct pthread *thread, pthread_mutex_t *mutex)
242 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
244 if (*mutex == THR_MUTEX_INITIALIZER)
245 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
246 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
247 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
251 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
257 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
259 struct pthread_mutex *m2;
261 m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
263 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
265 m->m_lock.m_ceilings[1] = -1;
269 shared_mutex_init(struct pthread_mutex *pmtx, const struct
270 pthread_mutex_attr *mutex_attr)
272 static const struct pthread_mutex_attr foobar_mutex_attr = {
273 .m_type = PTHREAD_MUTEX_DEFAULT,
274 .m_protocol = PTHREAD_PRIO_NONE,
276 .m_pshared = PTHREAD_PROCESS_SHARED
281 * Hack to allow multiple pthread_mutex_init() calls on the
282 * same process-shared mutex. We rely on kernel allocating
283 * zeroed offpage for the mutex, i.e. the
284 * PMUTEX_INITSTAGE_ALLOC value must be zero.
286 for (done = false; !done;) {
287 switch (pmtx->m_ps) {
288 case PMUTEX_INITSTAGE_DONE:
289 atomic_thread_fence_acq();
292 case PMUTEX_INITSTAGE_ALLOC:
293 if (atomic_cmpset_int(&pmtx->m_ps,
294 PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
295 if (mutex_attr == NULL)
296 mutex_attr = &foobar_mutex_attr;
297 mutex_init_body(pmtx, mutex_attr);
298 atomic_store_rel_int(&pmtx->m_ps,
299 PMUTEX_INITSTAGE_DONE);
303 case PMUTEX_INITSTAGE_BUSY:
307 PANIC("corrupted offpage");
314 __pthread_mutex_init(pthread_mutex_t *mutex,
315 const pthread_mutexattr_t *mutex_attr)
317 struct pthread_mutex *pmtx;
320 if (mutex_attr != NULL) {
321 ret = mutex_check_attr(*mutex_attr);
325 if (mutex_attr == NULL ||
326 (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
327 return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
330 pmtx = __thr_pshared_offpage(mutex, 1);
333 *mutex = THR_PSHARED_PTR;
334 shared_mutex_init(pmtx, *mutex_attr);
338 /* This function is used internally by malloc. */
340 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
341 void *(calloc_cb)(size_t, size_t))
343 static const struct pthread_mutex_attr attr = {
344 .m_type = PTHREAD_MUTEX_NORMAL,
345 .m_protocol = PTHREAD_PRIO_NONE,
347 .m_pshared = PTHREAD_PROCESS_PRIVATE,
351 ret = mutex_init(mutex, &attr, calloc_cb);
353 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
358 * Fix mutex ownership for child process.
360 * Process private mutex ownership is transmitted from the forking
361 * thread to the child process.
363 * Process shared mutex should not be inherited because owner is
364 * forking thread which is in parent process, they are removed from
365 * the owned mutex list.
368 queue_fork(struct pthread *curthread, struct mutex_queue *q,
369 struct mutex_queue *qp, uint bit)
371 struct pthread_mutex *m;
374 TAILQ_FOREACH(m, qp, m_pqe) {
375 TAILQ_INSERT_TAIL(q, m, m_qe);
376 m->m_lock.m_owner = TID(curthread) | bit;
377 m->m_owner = TID(curthread);
382 _mutex_fork(struct pthread *curthread)
385 queue_fork(curthread, &curthread->mq[TMQ_NORM],
386 &curthread->mq[TMQ_NORM_PRIV], 0);
387 queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
388 &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
392 _pthread_mutex_destroy(pthread_mutex_t *mutex)
394 pthread_mutex_t m, m1;
398 if (m < THR_MUTEX_DESTROYED) {
400 } else if (m == THR_MUTEX_DESTROYED) {
403 if (m == THR_PSHARED_PTR) {
404 m1 = __thr_pshared_offpage(mutex, 0);
406 mutex_assert_not_owned(m1);
407 __thr_pshared_destroy(mutex);
409 *mutex = THR_MUTEX_DESTROYED;
412 if (m->m_owner != 0) {
415 *mutex = THR_MUTEX_DESTROYED;
416 mutex_assert_not_owned(m);
426 mutex_qidx(struct pthread_mutex *m)
429 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
431 return (TMQ_NORM_PP);
435 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
439 m->m_owner = TID(curthread);
440 /* Add to the list of owned mutexes: */
441 mutex_assert_not_owned(m);
442 qidx = mutex_qidx(m);
443 TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
444 if (!is_pshared_mutex(m))
445 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
449 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
454 mutex_assert_is_owned(m);
455 qidx = mutex_qidx(m);
456 TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
457 if (!is_pshared_mutex(m))
458 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
459 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
460 set_inherited_priority(curthread, m);
465 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
471 if (*m == THR_PSHARED_PTR) {
472 *m = __thr_pshared_offpage(mutex, 0);
475 shared_mutex_init(*m, NULL);
476 } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
477 if (*m == THR_MUTEX_DESTROYED) {
480 ret = init_static(_get_curthread(), mutex);
489 __pthread_mutex_trylock(pthread_mutex_t *mutex)
491 struct pthread *curthread;
492 struct pthread_mutex *m;
496 ret = check_and_init_mutex(mutex, &m);
499 curthread = _get_curthread();
501 if (m->m_flags & PMUTEX_FLAG_PRIVATE)
502 THR_CRITICAL_ENTER(curthread);
503 ret = _thr_umutex_trylock(&m->m_lock, id);
504 if (__predict_true(ret == 0)) {
505 enqueue_mutex(curthread, m);
506 } else if (m->m_owner == id) {
507 ret = mutex_self_trylock(m);
509 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
510 THR_CRITICAL_LEAVE(curthread);
515 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
516 const struct timespec *abstime)
523 if (m->m_owner == id)
524 return (mutex_self_lock(m, abstime));
527 * For adaptive mutexes, spin for a bit in the expectation
528 * that if the application requests this mutex type then
529 * the lock is likely to be released quickly and it is
530 * faster than entering the kernel
534 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
535 goto sleep_in_kernel;
540 count = m->m_spinloops;
542 owner = m->m_lock.m_owner;
543 if ((owner & ~UMUTEX_CONTESTED) == 0) {
544 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
553 count = m->m_yieldloops;
556 owner = m->m_lock.m_owner;
557 if ((owner & ~UMUTEX_CONTESTED) == 0) {
558 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
566 if (abstime == NULL) {
567 ret = __thr_umutex_lock(&m->m_lock, id);
568 } else if (__predict_false(
569 abstime->tv_nsec < 0 ||
570 abstime->tv_nsec >= 1000000000)) {
573 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
577 enqueue_mutex(curthread, m);
583 mutex_lock_common(struct pthread_mutex *m,
584 const struct timespec *abstime, int cvattach)
586 struct pthread *curthread = _get_curthread();
589 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
590 THR_CRITICAL_ENTER(curthread);
591 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
592 enqueue_mutex(curthread, m);
595 ret = mutex_lock_sleep(curthread, m, abstime);
597 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
598 THR_CRITICAL_LEAVE(curthread);
603 __pthread_mutex_lock(pthread_mutex_t *mutex)
605 struct pthread_mutex *m;
609 ret = check_and_init_mutex(mutex, &m);
611 ret = mutex_lock_common(m, NULL, 0);
616 __pthread_mutex_timedlock(pthread_mutex_t *mutex,
617 const struct timespec *abstime)
619 struct pthread_mutex *m;
623 ret = check_and_init_mutex(mutex, &m);
625 ret = mutex_lock_common(m, abstime, 0);
630 _pthread_mutex_unlock(pthread_mutex_t *mutex)
632 struct pthread_mutex *mp;
634 if (*mutex == THR_PSHARED_PTR) {
635 mp = __thr_pshared_offpage(mutex, 0);
638 shared_mutex_init(mp, NULL);
642 return (mutex_unlock_common(mp, 0, NULL));
646 _mutex_cv_lock(struct pthread_mutex *m, int count)
650 error = mutex_lock_common(m, NULL, 1);
657 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
661 * Clear the count in case this is a recursive mutex.
665 (void)mutex_unlock_common(m, 1, defer);
670 _mutex_cv_attach(struct pthread_mutex *m, int count)
672 struct pthread *curthread = _get_curthread();
674 enqueue_mutex(curthread, m);
680 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
682 struct pthread *curthread = _get_curthread();
686 if ((error = _mutex_owned(curthread, mp)) != 0)
690 * Clear the count in case this is a recursive mutex.
692 *recurse = mp->m_count;
694 dequeue_mutex(curthread, mp);
696 /* Will this happen in real-world ? */
697 if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
699 mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
704 _thr_wake_all(curthread->defer_waiters,
705 curthread->nwaiter_defer);
706 curthread->nwaiter_defer = 0;
712 mutex_self_trylock(struct pthread_mutex *m)
716 switch (PMUTEX_TYPE(m->m_flags)) {
717 case PTHREAD_MUTEX_ERRORCHECK:
718 case PTHREAD_MUTEX_NORMAL:
719 case PTHREAD_MUTEX_ADAPTIVE_NP:
723 case PTHREAD_MUTEX_RECURSIVE:
724 /* Increment the lock count: */
725 if (m->m_count + 1 > 0) {
733 /* Trap invalid mutex types; */
741 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
743 struct timespec ts1, ts2;
746 switch (PMUTEX_TYPE(m->m_flags)) {
747 case PTHREAD_MUTEX_ERRORCHECK:
748 case PTHREAD_MUTEX_ADAPTIVE_NP:
750 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
751 abstime->tv_nsec >= 1000000000) {
754 clock_gettime(CLOCK_REALTIME, &ts1);
755 TIMESPEC_SUB(&ts2, abstime, &ts1);
756 __sys_nanosleep(&ts2, NULL);
761 * POSIX specifies that mutexes should return
762 * EDEADLK if a recursive lock is detected.
768 case PTHREAD_MUTEX_NORMAL:
770 * What SS2 define as a 'normal' mutex. Intentionally
771 * deadlock on attempts to get a lock you already own.
775 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
776 abstime->tv_nsec >= 1000000000) {
779 clock_gettime(CLOCK_REALTIME, &ts1);
780 TIMESPEC_SUB(&ts2, abstime, &ts1);
781 __sys_nanosleep(&ts2, NULL);
788 __sys_nanosleep(&ts1, NULL);
792 case PTHREAD_MUTEX_RECURSIVE:
793 /* Increment the lock count: */
794 if (m->m_count + 1 > 0) {
802 /* Trap invalid mutex types; */
810 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
812 struct pthread *curthread = _get_curthread();
816 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
817 if (m == THR_MUTEX_DESTROYED)
825 * Check if the running thread is not the owner of the mutex.
827 if (__predict_false(m->m_owner != id))
832 PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
836 if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
838 m->m_flags &= ~PMUTEX_FLAG_DEFERED;
842 dequeue_mutex(curthread, m);
843 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
845 if (mtx_defer == NULL && defered) {
846 _thr_wake_all(curthread->defer_waiters,
847 curthread->nwaiter_defer);
848 curthread->nwaiter_defer = 0;
851 if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
852 THR_CRITICAL_LEAVE(curthread);
857 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
860 struct pthread_mutex *m;
862 if (*mutex == THR_PSHARED_PTR) {
863 m = __thr_pshared_offpage(mutex, 0);
866 shared_mutex_init(m, NULL);
869 if (m <= THR_MUTEX_DESTROYED)
872 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
874 *prioceiling = m->m_lock.m_ceilings[0];
879 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
880 int ceiling, int *old_ceiling)
882 struct pthread *curthread;
883 struct pthread_mutex *m, *m1, *m2;
884 struct mutex_queue *q, *qp;
887 if (*mutex == THR_PSHARED_PTR) {
888 m = __thr_pshared_offpage(mutex, 0);
891 shared_mutex_init(m, NULL);
894 if (m <= THR_MUTEX_DESTROYED)
897 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
900 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
904 curthread = _get_curthread();
905 if (m->m_owner == TID(curthread)) {
906 mutex_assert_is_owned(m);
907 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
908 m2 = TAILQ_NEXT(m, m_qe);
909 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
910 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
911 q = &curthread->mq[TMQ_NORM_PP];
912 qp = &curthread->mq[TMQ_NORM_PP_PRIV];
913 TAILQ_REMOVE(q, m, m_qe);
914 if (!is_pshared_mutex(m))
915 TAILQ_REMOVE(qp, m, m_pqe);
916 TAILQ_FOREACH(m2, q, m_qe) {
917 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
918 TAILQ_INSERT_BEFORE(m2, m, m_qe);
919 if (!is_pshared_mutex(m)) {
921 is_pshared_mutex(m2)) {
926 TAILQ_INSERT_HEAD(qp,
929 TAILQ_INSERT_BEFORE(m2,
936 TAILQ_INSERT_TAIL(q, m, m_qe);
937 if (!is_pshared_mutex(m))
938 TAILQ_INSERT_TAIL(qp, m, m_pqe);
945 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
947 struct pthread_mutex *m;
950 ret = check_and_init_mutex(mutex, &m);
952 *count = m->m_spinloops;
957 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
959 struct pthread_mutex *m;
962 ret = check_and_init_mutex(mutex, &m);
964 m->m_spinloops = count;
969 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
971 struct pthread_mutex *m;
974 ret = check_and_init_mutex(mutex, &m);
976 *count = m->m_yieldloops;
981 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
983 struct pthread_mutex *m;
986 ret = check_and_init_mutex(mutex, &m);
988 m->m_yieldloops = count;
993 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
995 struct pthread_mutex *m;
997 if (*mutex == THR_PSHARED_PTR) {
998 m = __thr_pshared_offpage(mutex, 0);
1001 shared_mutex_init(m, NULL);
1004 if (m <= THR_MUTEX_DESTROYED)
1007 return (m->m_owner == TID(_get_curthread()));
1011 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
1013 if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
1014 if (mp == THR_MUTEX_DESTROYED)
1018 if (mp->m_owner != TID(curthread))