2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
8 * Portions of this software were developed by Konstantin Belousov
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by John Birrell.
22 * 4. Neither the name of the author nor the names of any co-contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "namespace.h"
46 #include <sys/param.h>
47 #include <sys/queue.h>
49 #include <pthread_np.h>
50 #include "un-namespace.h"
52 #include "thr_private.h"
54 _Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE,
55 "pthread_mutex is too large for off-page");
58 * For adaptive mutexes, how many times to spin doing trylock2
59 * before entering the kernel to block
61 #define MUTEX_ADAPTIVE_SPINS 2000
66 int __pthread_mutex_consistent(pthread_mutex_t *mutex);
67 int __pthread_mutex_init(pthread_mutex_t * __restrict mutex,
68 const pthread_mutexattr_t * __restrict mutex_attr);
69 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
70 int __pthread_mutex_lock(pthread_mutex_t *mutex);
71 int __pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
72 const struct timespec * __restrict abstime);
73 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
74 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
75 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
76 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
77 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
78 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
80 static int mutex_self_trylock(pthread_mutex_t);
81 static int mutex_self_lock(pthread_mutex_t,
82 const struct timespec *abstime);
83 static int mutex_unlock_common(struct pthread_mutex *, bool, int *);
84 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
85 const struct timespec *);
86 static void mutex_init_robust(struct pthread *curthread);
87 static int mutex_qidx(struct pthread_mutex *m);
88 static bool is_robust_mutex(struct pthread_mutex *m);
89 static bool is_pshared_mutex(struct pthread_mutex *m);
91 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
92 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
93 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
94 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
95 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
96 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
97 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
98 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
99 __weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent);
100 __strong_reference(_pthread_mutex_consistent, __pthread_mutex_consistent);
102 /* Single underscore versions provided for libc internal usage: */
103 /* No difference between libc and application usage of these: */
104 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
105 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
107 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
108 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
110 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
111 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
112 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
114 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
115 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
116 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
117 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
120 mutex_init_link(struct pthread_mutex *m)
123 #if defined(_PTHREADS_INVARIANTS)
124 m->m_qe.tqe_prev = NULL;
125 m->m_qe.tqe_next = NULL;
126 m->m_pqe.tqe_prev = NULL;
127 m->m_pqe.tqe_next = NULL;
132 mutex_assert_is_owned(struct pthread_mutex *m __unused)
135 #if defined(_PTHREADS_INVARIANTS)
136 if (__predict_false(m->m_qe.tqe_prev == NULL))
137 PANIC("mutex %p own %#x is not on list %p %p",
138 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
143 mutex_assert_not_owned(struct pthread *curthread __unused,
144 struct pthread_mutex *m __unused)
147 #if defined(_PTHREADS_INVARIANTS)
148 if (__predict_false(m->m_qe.tqe_prev != NULL ||
149 m->m_qe.tqe_next != NULL))
150 PANIC("mutex %p own %#x is on list %p %p",
151 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
152 if (__predict_false(is_robust_mutex(m) &&
153 (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL ||
154 (is_pshared_mutex(m) && curthread->robust_list ==
155 (uintptr_t)&m->m_lock) ||
156 (!is_pshared_mutex(m) && curthread->priv_robust_list ==
157 (uintptr_t)&m->m_lock))))
159 "mutex %p own %#x is on robust linkage %p %p head %p phead %p",
160 m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk,
161 m->m_rb_prev, (void *)curthread->robust_list,
162 (void *)curthread->priv_robust_list);
167 is_pshared_mutex(struct pthread_mutex *m)
170 return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
174 is_robust_mutex(struct pthread_mutex *m)
177 return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0);
181 _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
184 #if defined(_PTHREADS_INVARIANTS)
185 if (__predict_false(curthread->inact_mtx != 0))
186 PANIC("inact_mtx enter");
188 if (!is_robust_mutex(m))
191 mutex_init_robust(curthread);
192 curthread->inact_mtx = (uintptr_t)&m->m_lock;
197 _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused)
200 #if defined(_PTHREADS_INVARIANTS)
201 if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock))
202 PANIC("inact_mtx leave");
204 curthread->inact_mtx = 0;
208 mutex_check_attr(const struct pthread_mutex_attr *attr)
211 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
212 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
214 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
215 attr->m_protocol > PTHREAD_PRIO_PROTECT)
221 mutex_init_robust(struct pthread *curthread)
223 struct umtx_robust_lists_params rb;
225 if (curthread == NULL)
226 curthread = _get_curthread();
227 if (curthread->robust_inited)
229 rb.robust_list_offset = (uintptr_t)&curthread->robust_list;
230 rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list;
231 rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx;
232 _umtx_op(NULL, UMTX_OP_ROBUST_LISTS, sizeof(rb), &rb, NULL);
233 curthread->robust_inited = 1;
237 mutex_init_body(struct pthread_mutex *pmutex,
238 const struct pthread_mutex_attr *attr)
241 pmutex->m_flags = attr->m_type;
243 pmutex->m_spinloops = 0;
244 pmutex->m_yieldloops = 0;
245 mutex_init_link(pmutex);
246 switch (attr->m_protocol) {
247 case PTHREAD_PRIO_NONE:
248 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
249 pmutex->m_lock.m_flags = 0;
251 case PTHREAD_PRIO_INHERIT:
252 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
253 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
255 case PTHREAD_PRIO_PROTECT:
256 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
257 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
258 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
261 if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
262 pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
263 if (attr->m_robust == PTHREAD_MUTEX_ROBUST) {
264 mutex_init_robust(NULL);
265 pmutex->m_lock.m_flags |= UMUTEX_ROBUST;
267 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
268 pmutex->m_spinloops =
269 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
270 pmutex->m_yieldloops = _thr_yieldloops;
275 mutex_init(pthread_mutex_t *mutex,
276 const struct pthread_mutex_attr *mutex_attr,
277 void *(calloc_cb)(size_t, size_t))
279 const struct pthread_mutex_attr *attr;
280 struct pthread_mutex *pmutex;
283 if (mutex_attr == NULL) {
284 attr = &_pthread_mutexattr_default;
287 error = mutex_check_attr(attr);
291 if ((pmutex = (pthread_mutex_t)
292 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
294 mutex_init_body(pmutex, attr);
300 init_static(struct pthread *thread, pthread_mutex_t *mutex)
304 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
306 if (*mutex == THR_MUTEX_INITIALIZER)
307 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
308 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
309 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
313 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
319 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
321 struct pthread_mutex *m2;
323 m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue);
325 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
327 m->m_lock.m_ceilings[1] = -1;
331 shared_mutex_init(struct pthread_mutex *pmtx, const struct
332 pthread_mutex_attr *mutex_attr)
334 static const struct pthread_mutex_attr foobar_mutex_attr = {
335 .m_type = PTHREAD_MUTEX_DEFAULT,
336 .m_protocol = PTHREAD_PRIO_NONE,
338 .m_pshared = PTHREAD_PROCESS_SHARED,
339 .m_robust = PTHREAD_MUTEX_STALLED,
344 * Hack to allow multiple pthread_mutex_init() calls on the
345 * same process-shared mutex. We rely on kernel allocating
346 * zeroed offpage for the mutex, i.e. the
347 * PMUTEX_INITSTAGE_ALLOC value must be zero.
349 for (done = false; !done;) {
350 switch (pmtx->m_ps) {
351 case PMUTEX_INITSTAGE_DONE:
352 atomic_thread_fence_acq();
355 case PMUTEX_INITSTAGE_ALLOC:
356 if (atomic_cmpset_int(&pmtx->m_ps,
357 PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
358 if (mutex_attr == NULL)
359 mutex_attr = &foobar_mutex_attr;
360 mutex_init_body(pmtx, mutex_attr);
361 atomic_store_rel_int(&pmtx->m_ps,
362 PMUTEX_INITSTAGE_DONE);
366 case PMUTEX_INITSTAGE_BUSY:
370 PANIC("corrupted offpage");
377 __pthread_mutex_init(pthread_mutex_t * __restrict mutex,
378 const pthread_mutexattr_t * __restrict mutex_attr)
380 struct pthread_mutex *pmtx;
383 if (mutex_attr != NULL) {
384 ret = mutex_check_attr(*mutex_attr);
388 if (mutex_attr == NULL ||
389 (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
390 return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
393 pmtx = __thr_pshared_offpage(__DECONST(void *, mutex), 1);
396 *mutex = THR_PSHARED_PTR;
397 shared_mutex_init(pmtx, *mutex_attr);
401 /* This function is used internally by malloc. */
403 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
404 void *(calloc_cb)(size_t, size_t))
406 static const struct pthread_mutex_attr attr = {
407 .m_type = PTHREAD_MUTEX_NORMAL,
408 .m_protocol = PTHREAD_PRIO_NONE,
410 .m_pshared = PTHREAD_PROCESS_PRIVATE,
411 .m_robust = PTHREAD_MUTEX_STALLED,
415 ret = mutex_init(mutex, &attr, calloc_cb);
417 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
422 * Fix mutex ownership for child process.
424 * Process private mutex ownership is transmitted from the forking
425 * thread to the child process.
427 * Process shared mutex should not be inherited because owner is
428 * forking thread which is in parent process, they are removed from
429 * the owned mutex list.
432 queue_fork(struct pthread *curthread, struct mutex_queue *q,
433 struct mutex_queue *qp, uint bit)
435 struct pthread_mutex *m;
438 TAILQ_FOREACH(m, qp, m_pqe) {
439 TAILQ_INSERT_TAIL(q, m, m_qe);
440 m->m_lock.m_owner = TID(curthread) | bit;
445 _mutex_fork(struct pthread *curthread)
448 queue_fork(curthread, &curthread->mq[TMQ_NORM],
449 &curthread->mq[TMQ_NORM_PRIV], 0);
450 queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
451 &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
452 queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP],
453 &curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED);
454 curthread->robust_list = 0;
458 _pthread_mutex_destroy(pthread_mutex_t *mutex)
460 pthread_mutex_t m, m1;
464 if (m < THR_MUTEX_DESTROYED) {
466 } else if (m == THR_MUTEX_DESTROYED) {
469 if (m == THR_PSHARED_PTR) {
470 m1 = __thr_pshared_offpage(mutex, 0);
472 mutex_assert_not_owned(_get_curthread(), m1);
473 __thr_pshared_destroy(mutex);
475 *mutex = THR_MUTEX_DESTROYED;
478 if (PMUTEX_OWNER_ID(m) != 0 &&
479 (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) {
482 *mutex = THR_MUTEX_DESTROYED;
483 mutex_assert_not_owned(_get_curthread(), m);
493 mutex_qidx(struct pthread_mutex *m)
496 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
498 return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP);
502 * Both enqueue_mutex() and dequeue_mutex() operate on the
503 * thread-private linkage of the locked mutexes and on the robust
506 * Robust list, as seen by kernel, must be consistent even in the case
507 * of thread termination at arbitrary moment. Since either enqueue or
508 * dequeue for list walked by kernel consists of rewriting a single
509 * forward pointer, it is safe. On the other hand, rewrite of the
510 * back pointer is not atomic WRT the forward one, but kernel does not
514 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m,
517 struct pthread_mutex *m1;
521 /* Add to the list of owned mutexes: */
522 if (error != EOWNERDEAD)
523 mutex_assert_not_owned(curthread, m);
524 qidx = mutex_qidx(m);
525 TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
526 if (!is_pshared_mutex(m))
527 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
528 if (is_robust_mutex(m)) {
529 rl = is_pshared_mutex(m) ? &curthread->robust_list :
530 &curthread->priv_robust_list;
533 m1 = __containerof((void *)*rl,
534 struct pthread_mutex, m_lock);
535 m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock;
539 m->m_lock.m_rb_lnk = 0;
541 *rl = (uintptr_t)&m->m_lock;
546 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
548 struct pthread_mutex *mp, *mn;
551 mutex_assert_is_owned(m);
552 qidx = mutex_qidx(m);
553 if (is_robust_mutex(m)) {
556 if (is_pshared_mutex(m)) {
557 curthread->robust_list = m->m_lock.m_rb_lnk;
559 curthread->priv_robust_list =
563 mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk;
565 if (m->m_lock.m_rb_lnk != 0) {
566 mn = __containerof((void *)m->m_lock.m_rb_lnk,
567 struct pthread_mutex, m_lock);
568 mn->m_rb_prev = m->m_rb_prev;
570 m->m_lock.m_rb_lnk = 0;
573 TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
574 if (!is_pshared_mutex(m))
575 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
576 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
577 set_inherited_priority(curthread, m);
582 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
588 if (*m == THR_PSHARED_PTR) {
589 *m = __thr_pshared_offpage(mutex, 0);
593 shared_mutex_init(*m, NULL);
594 } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
595 if (*m == THR_MUTEX_DESTROYED) {
598 ret = init_static(_get_curthread(), mutex);
607 __pthread_mutex_trylock(pthread_mutex_t *mutex)
609 struct pthread *curthread;
610 struct pthread_mutex *m;
614 ret = check_and_init_mutex(mutex, &m);
617 curthread = _get_curthread();
619 if (m->m_flags & PMUTEX_FLAG_PRIVATE)
620 THR_CRITICAL_ENTER(curthread);
621 robust = _mutex_enter_robust(curthread, m);
622 ret = _thr_umutex_trylock(&m->m_lock, id);
623 if (__predict_true(ret == 0) || ret == EOWNERDEAD) {
624 enqueue_mutex(curthread, m, ret);
625 if (ret == EOWNERDEAD)
626 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
627 } else if (PMUTEX_OWNER_ID(m) == id) {
628 ret = mutex_self_trylock(m);
631 _mutex_leave_robust(curthread, m);
632 if (ret != 0 && ret != EOWNERDEAD &&
633 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0)
634 THR_CRITICAL_LEAVE(curthread);
639 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
640 const struct timespec *abstime)
646 if (PMUTEX_OWNER_ID(m) == id)
647 return (mutex_self_lock(m, abstime));
650 * For adaptive mutexes, spin for a bit in the expectation
651 * that if the application requests this mutex type then
652 * the lock is likely to be released quickly and it is
653 * faster than entering the kernel
655 if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT |
656 UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0))
657 goto sleep_in_kernel;
662 count = m->m_spinloops;
664 owner = m->m_lock.m_owner;
665 if ((owner & ~UMUTEX_CONTESTED) == 0) {
666 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
676 count = m->m_yieldloops;
679 owner = m->m_lock.m_owner;
680 if ((owner & ~UMUTEX_CONTESTED) == 0) {
681 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
691 ret = __thr_umutex_lock(&m->m_lock, id);
692 else if (__predict_false(abstime->tv_nsec < 0 ||
693 abstime->tv_nsec >= 1000000000))
696 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
698 if (ret == 0 || ret == EOWNERDEAD) {
699 enqueue_mutex(curthread, m, ret);
700 if (ret == EOWNERDEAD)
701 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
707 mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime,
708 bool cvattach, bool rb_onlist)
710 struct pthread *curthread;
713 robust = 0; /* pacify gcc */
714 curthread = _get_curthread();
715 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
716 THR_CRITICAL_ENTER(curthread);
718 robust = _mutex_enter_robust(curthread, m);
719 ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
720 if (ret == 0 || ret == EOWNERDEAD) {
721 enqueue_mutex(curthread, m, ret);
722 if (ret == EOWNERDEAD)
723 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
725 ret = mutex_lock_sleep(curthread, m, abstime);
727 if (!rb_onlist && robust)
728 _mutex_leave_robust(curthread, m);
729 if (ret != 0 && ret != EOWNERDEAD &&
730 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach)
731 THR_CRITICAL_LEAVE(curthread);
736 __pthread_mutex_lock(pthread_mutex_t *mutex)
738 struct pthread_mutex *m;
742 ret = check_and_init_mutex(mutex, &m);
744 ret = mutex_lock_common(m, NULL, false, false);
749 __pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
750 const struct timespec * __restrict abstime)
752 struct pthread_mutex *m;
756 ret = check_and_init_mutex(mutex, &m);
758 ret = mutex_lock_common(m, abstime, false, false);
763 _pthread_mutex_unlock(pthread_mutex_t *mutex)
765 struct pthread_mutex *mp;
767 if (*mutex == THR_PSHARED_PTR) {
768 mp = __thr_pshared_offpage(mutex, 0);
771 shared_mutex_init(mp, NULL);
775 return (mutex_unlock_common(mp, false, NULL));
779 _mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist)
783 error = mutex_lock_common(m, NULL, true, rb_onlist);
784 if (error == 0 || error == EOWNERDEAD)
790 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
794 * Clear the count in case this is a recursive mutex.
798 (void)mutex_unlock_common(m, true, defer);
803 _mutex_cv_attach(struct pthread_mutex *m, int count)
805 struct pthread *curthread;
807 curthread = _get_curthread();
808 enqueue_mutex(curthread, m, 0);
814 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
816 struct pthread *curthread;
819 curthread = _get_curthread();
820 if ((error = _mutex_owned(curthread, mp)) != 0)
824 * Clear the count in case this is a recursive mutex.
826 *recurse = mp->m_count;
828 dequeue_mutex(curthread, mp);
830 /* Will this happen in real-world ? */
831 if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
833 mp->m_flags &= ~PMUTEX_FLAG_DEFERRED;
838 _thr_wake_all(curthread->defer_waiters,
839 curthread->nwaiter_defer);
840 curthread->nwaiter_defer = 0;
846 mutex_self_trylock(struct pthread_mutex *m)
850 switch (PMUTEX_TYPE(m->m_flags)) {
851 case PTHREAD_MUTEX_ERRORCHECK:
852 case PTHREAD_MUTEX_NORMAL:
853 case PTHREAD_MUTEX_ADAPTIVE_NP:
857 case PTHREAD_MUTEX_RECURSIVE:
858 /* Increment the lock count: */
859 if (m->m_count + 1 > 0) {
867 /* Trap invalid mutex types; */
875 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
877 struct timespec ts1, ts2;
880 switch (PMUTEX_TYPE(m->m_flags)) {
881 case PTHREAD_MUTEX_ERRORCHECK:
882 case PTHREAD_MUTEX_ADAPTIVE_NP:
884 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
885 abstime->tv_nsec >= 1000000000) {
888 clock_gettime(CLOCK_REALTIME, &ts1);
889 TIMESPEC_SUB(&ts2, abstime, &ts1);
890 __sys_nanosleep(&ts2, NULL);
895 * POSIX specifies that mutexes should return
896 * EDEADLK if a recursive lock is detected.
902 case PTHREAD_MUTEX_NORMAL:
904 * What SS2 define as a 'normal' mutex. Intentionally
905 * deadlock on attempts to get a lock you already own.
909 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
910 abstime->tv_nsec >= 1000000000) {
913 clock_gettime(CLOCK_REALTIME, &ts1);
914 TIMESPEC_SUB(&ts2, abstime, &ts1);
915 __sys_nanosleep(&ts2, NULL);
922 __sys_nanosleep(&ts1, NULL);
926 case PTHREAD_MUTEX_RECURSIVE:
927 /* Increment the lock count: */
928 if (m->m_count + 1 > 0) {
936 /* Trap invalid mutex types; */
944 mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer)
946 struct pthread *curthread;
948 int deferred, error, private, robust;
950 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
951 if (m == THR_MUTEX_DESTROYED)
956 curthread = _get_curthread();
960 * Check if the running thread is not the owner of the mutex.
962 if (__predict_false(PMUTEX_OWNER_ID(m) != id))
966 private = (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0;
967 if (__predict_false(PMUTEX_TYPE(m->m_flags) ==
968 PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) {
971 if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
973 m->m_flags &= ~PMUTEX_FLAG_DEFERRED;
977 robust = _mutex_enter_robust(curthread, m);
978 dequeue_mutex(curthread, m);
979 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
981 if (mtx_defer == NULL) {
982 _thr_wake_all(curthread->defer_waiters,
983 curthread->nwaiter_defer);
984 curthread->nwaiter_defer = 0;
989 _mutex_leave_robust(curthread, m);
992 THR_CRITICAL_LEAVE(curthread);
997 _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,
998 int * __restrict prioceiling)
1000 struct pthread_mutex *m;
1002 if (*mutex == THR_PSHARED_PTR) {
1003 m = __thr_pshared_offpage(__DECONST(void *, mutex), 0);
1006 shared_mutex_init(m, NULL);
1009 if (m <= THR_MUTEX_DESTROYED)
1012 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
1014 *prioceiling = m->m_lock.m_ceilings[0];
1019 _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,
1020 int ceiling, int * __restrict old_ceiling)
1022 struct pthread *curthread;
1023 struct pthread_mutex *m, *m1, *m2;
1024 struct mutex_queue *q, *qp;
1027 if (*mutex == THR_PSHARED_PTR) {
1028 m = __thr_pshared_offpage(mutex, 0);
1031 shared_mutex_init(m, NULL);
1034 if (m <= THR_MUTEX_DESTROYED)
1037 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
1040 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
1044 curthread = _get_curthread();
1045 if (PMUTEX_OWNER_ID(m) == TID(curthread)) {
1046 mutex_assert_is_owned(m);
1047 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
1048 m2 = TAILQ_NEXT(m, m_qe);
1049 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
1050 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
1051 qidx = mutex_qidx(m);
1052 q = &curthread->mq[qidx];
1053 qp = &curthread->mq[qidx + 1];
1054 TAILQ_REMOVE(q, m, m_qe);
1055 if (!is_pshared_mutex(m))
1056 TAILQ_REMOVE(qp, m, m_pqe);
1057 TAILQ_FOREACH(m2, q, m_qe) {
1058 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
1059 TAILQ_INSERT_BEFORE(m2, m, m_qe);
1060 if (!is_pshared_mutex(m)) {
1061 while (m2 != NULL &&
1062 is_pshared_mutex(m2)) {
1067 TAILQ_INSERT_HEAD(qp,
1070 TAILQ_INSERT_BEFORE(m2,
1077 TAILQ_INSERT_TAIL(q, m, m_qe);
1078 if (!is_pshared_mutex(m))
1079 TAILQ_INSERT_TAIL(qp, m, m_pqe);
1086 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
1088 struct pthread_mutex *m;
1091 ret = check_and_init_mutex(mutex, &m);
1093 *count = m->m_spinloops;
1098 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
1100 struct pthread_mutex *m;
1103 ret = check_and_init_mutex(mutex, &m);
1105 m->m_spinloops = count;
1110 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
1112 struct pthread_mutex *m;
1115 ret = check_and_init_mutex(mutex, &m);
1117 *count = m->m_yieldloops;
1122 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
1124 struct pthread_mutex *m;
1127 ret = check_and_init_mutex(mutex, &m);
1129 m->m_yieldloops = count;
1134 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
1136 struct pthread_mutex *m;
1138 if (*mutex == THR_PSHARED_PTR) {
1139 m = __thr_pshared_offpage(mutex, 0);
1142 shared_mutex_init(m, NULL);
1145 if (m <= THR_MUTEX_DESTROYED)
1148 return (PMUTEX_OWNER_ID(m) == TID(_get_curthread()));
1152 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
1155 if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
1156 if (mp == THR_MUTEX_DESTROYED)
1160 if (PMUTEX_OWNER_ID(mp) != TID(curthread))
1166 _pthread_mutex_consistent(pthread_mutex_t *mutex)
1168 struct pthread_mutex *m;
1169 struct pthread *curthread;
1171 if (*mutex == THR_PSHARED_PTR) {
1172 m = __thr_pshared_offpage(mutex, 0);
1175 shared_mutex_init(m, NULL);
1178 if (m <= THR_MUTEX_DESTROYED)
1181 curthread = _get_curthread();
1182 if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) !=
1183 (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT))
1185 if (PMUTEX_OWNER_ID(m) != TID(curthread))
1187 m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT;