2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
6 * Copyright (c) 2015, 2016 The FreeBSD Foundation
10 * Portions of this software were developed by Konstantin Belousov
11 * under sponsorship from the FreeBSD Foundation.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by John Birrell.
24 * 4. Neither the name of the author nor the names of any co-contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "namespace.h"
48 #include <sys/param.h>
49 #include <sys/queue.h>
51 #include <pthread_np.h>
52 #include "un-namespace.h"
54 #include "thr_private.h"
56 _Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE,
57 "pthread_mutex is too large for off-page");
60 * For adaptive mutexes, how many times to spin doing trylock2
61 * before entering the kernel to block
63 #define MUTEX_ADAPTIVE_SPINS 2000
68 int __pthread_mutex_consistent(pthread_mutex_t *mutex);
69 int __pthread_mutex_init(pthread_mutex_t *mutex,
70 const pthread_mutexattr_t *mutex_attr);
71 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
72 int __pthread_mutex_lock(pthread_mutex_t *mutex);
73 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
74 const struct timespec *abstime);
75 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
76 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
77 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
78 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
79 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
80 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
82 static int mutex_self_trylock(pthread_mutex_t);
83 static int mutex_self_lock(pthread_mutex_t,
84 const struct timespec *abstime);
85 static int mutex_unlock_common(struct pthread_mutex *, bool, int *);
86 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
87 const struct timespec *);
88 static void mutex_init_robust(struct pthread *curthread);
89 static int mutex_qidx(struct pthread_mutex *m);
90 static bool is_robust_mutex(struct pthread_mutex *m);
91 static bool is_pshared_mutex(struct pthread_mutex *m);
93 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
94 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
95 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
96 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
97 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
98 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
99 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
100 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
101 __weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent);
102 __strong_reference(_pthread_mutex_consistent, __pthread_mutex_consistent);
104 /* Single underscore versions provided for libc internal usage: */
105 /* No difference between libc and application usage of these: */
106 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
107 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
109 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
110 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
112 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
113 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
114 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
116 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
117 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
118 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
119 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
122 mutex_init_link(struct pthread_mutex *m)
125 #if defined(_PTHREADS_INVARIANTS)
126 m->m_qe.tqe_prev = NULL;
127 m->m_qe.tqe_next = NULL;
128 m->m_pqe.tqe_prev = NULL;
129 m->m_pqe.tqe_next = NULL;
134 mutex_assert_is_owned(struct pthread_mutex *m __unused)
137 #if defined(_PTHREADS_INVARIANTS)
138 if (__predict_false(m->m_qe.tqe_prev == NULL))
139 PANIC("mutex %p own %#x is not on list %p %p",
140 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
145 mutex_assert_not_owned(struct pthread *curthread __unused,
146 struct pthread_mutex *m __unused)
149 #if defined(_PTHREADS_INVARIANTS)
150 if (__predict_false(m->m_qe.tqe_prev != NULL ||
151 m->m_qe.tqe_next != NULL))
152 PANIC("mutex %p own %#x is on list %p %p",
153 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
154 if (__predict_false(is_robust_mutex(m) &&
155 (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL ||
156 (is_pshared_mutex(m) && curthread->robust_list ==
157 (uintptr_t)&m->m_lock) ||
158 (!is_pshared_mutex(m) && curthread->priv_robust_list ==
159 (uintptr_t)&m->m_lock))))
161 "mutex %p own %#x is on robust linkage %p %p head %p phead %p",
162 m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk,
163 m->m_rb_prev, (void *)curthread->robust_list,
164 (void *)curthread->priv_robust_list);
169 is_pshared_mutex(struct pthread_mutex *m)
172 return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
176 is_robust_mutex(struct pthread_mutex *m)
179 return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0);
183 _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
186 #if defined(_PTHREADS_INVARIANTS)
187 if (__predict_false(curthread->inact_mtx != 0))
188 PANIC("inact_mtx enter");
190 if (!is_robust_mutex(m))
193 mutex_init_robust(curthread);
194 curthread->inact_mtx = (uintptr_t)&m->m_lock;
199 _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused)
202 #if defined(_PTHREADS_INVARIANTS)
203 if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock))
204 PANIC("inact_mtx leave");
206 curthread->inact_mtx = 0;
210 mutex_check_attr(const struct pthread_mutex_attr *attr)
213 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
214 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
216 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
217 attr->m_protocol > PTHREAD_PRIO_PROTECT)
223 mutex_init_robust(struct pthread *curthread)
225 struct umtx_robust_lists_params rb;
227 if (curthread == NULL)
228 curthread = _get_curthread();
229 if (curthread->robust_inited)
231 rb.robust_list_offset = (uintptr_t)&curthread->robust_list;
232 rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list;
233 rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx;
234 _umtx_op(NULL, UMTX_OP_ROBUST_LISTS, sizeof(rb), &rb, NULL);
235 curthread->robust_inited = 1;
239 mutex_init_body(struct pthread_mutex *pmutex,
240 const struct pthread_mutex_attr *attr)
243 pmutex->m_flags = attr->m_type;
245 pmutex->m_spinloops = 0;
246 pmutex->m_yieldloops = 0;
247 mutex_init_link(pmutex);
248 switch (attr->m_protocol) {
249 case PTHREAD_PRIO_NONE:
250 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
251 pmutex->m_lock.m_flags = 0;
253 case PTHREAD_PRIO_INHERIT:
254 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
255 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
257 case PTHREAD_PRIO_PROTECT:
258 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
259 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
260 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
263 if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
264 pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
265 if (attr->m_robust == PTHREAD_MUTEX_ROBUST) {
266 mutex_init_robust(NULL);
267 pmutex->m_lock.m_flags |= UMUTEX_ROBUST;
269 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
270 pmutex->m_spinloops =
271 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
272 pmutex->m_yieldloops = _thr_yieldloops;
277 mutex_init(pthread_mutex_t *mutex,
278 const struct pthread_mutex_attr *mutex_attr,
279 void *(calloc_cb)(size_t, size_t))
281 const struct pthread_mutex_attr *attr;
282 struct pthread_mutex *pmutex;
285 if (mutex_attr == NULL) {
286 attr = &_pthread_mutexattr_default;
289 error = mutex_check_attr(attr);
293 if ((pmutex = (pthread_mutex_t)
294 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
296 mutex_init_body(pmutex, attr);
302 init_static(struct pthread *thread, pthread_mutex_t *mutex)
306 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
308 if (*mutex == THR_MUTEX_INITIALIZER)
309 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
310 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
311 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
315 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
321 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
323 struct pthread_mutex *m2;
325 m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue);
327 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
329 m->m_lock.m_ceilings[1] = -1;
333 shared_mutex_init(struct pthread_mutex *pmtx, const struct
334 pthread_mutex_attr *mutex_attr)
336 static const struct pthread_mutex_attr foobar_mutex_attr = {
337 .m_type = PTHREAD_MUTEX_DEFAULT,
338 .m_protocol = PTHREAD_PRIO_NONE,
340 .m_pshared = PTHREAD_PROCESS_SHARED,
341 .m_robust = PTHREAD_MUTEX_STALLED,
346 * Hack to allow multiple pthread_mutex_init() calls on the
347 * same process-shared mutex. We rely on kernel allocating
348 * zeroed offpage for the mutex, i.e. the
349 * PMUTEX_INITSTAGE_ALLOC value must be zero.
351 for (done = false; !done;) {
352 switch (pmtx->m_ps) {
353 case PMUTEX_INITSTAGE_DONE:
354 atomic_thread_fence_acq();
357 case PMUTEX_INITSTAGE_ALLOC:
358 if (atomic_cmpset_int(&pmtx->m_ps,
359 PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
360 if (mutex_attr == NULL)
361 mutex_attr = &foobar_mutex_attr;
362 mutex_init_body(pmtx, mutex_attr);
363 atomic_store_rel_int(&pmtx->m_ps,
364 PMUTEX_INITSTAGE_DONE);
368 case PMUTEX_INITSTAGE_BUSY:
372 PANIC("corrupted offpage");
379 __pthread_mutex_init(pthread_mutex_t *mutex,
380 const pthread_mutexattr_t *mutex_attr)
382 struct pthread_mutex *pmtx;
385 if (mutex_attr != NULL) {
386 ret = mutex_check_attr(*mutex_attr);
390 if (mutex_attr == NULL ||
391 (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
392 return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
395 pmtx = __thr_pshared_offpage(mutex, 1);
398 *mutex = THR_PSHARED_PTR;
399 shared_mutex_init(pmtx, *mutex_attr);
403 /* This function is used internally by malloc. */
405 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
406 void *(calloc_cb)(size_t, size_t))
408 static const struct pthread_mutex_attr attr = {
409 .m_type = PTHREAD_MUTEX_NORMAL,
410 .m_protocol = PTHREAD_PRIO_NONE,
412 .m_pshared = PTHREAD_PROCESS_PRIVATE,
413 .m_robust = PTHREAD_MUTEX_STALLED,
417 ret = mutex_init(mutex, &attr, calloc_cb);
419 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
424 * Fix mutex ownership for child process.
426 * Process private mutex ownership is transmitted from the forking
427 * thread to the child process.
429 * Process shared mutex should not be inherited because owner is
430 * forking thread which is in parent process, they are removed from
431 * the owned mutex list.
434 queue_fork(struct pthread *curthread, struct mutex_queue *q,
435 struct mutex_queue *qp, uint bit)
437 struct pthread_mutex *m;
440 TAILQ_FOREACH(m, qp, m_pqe) {
441 TAILQ_INSERT_TAIL(q, m, m_qe);
442 m->m_lock.m_owner = TID(curthread) | bit;
447 _mutex_fork(struct pthread *curthread)
450 queue_fork(curthread, &curthread->mq[TMQ_NORM],
451 &curthread->mq[TMQ_NORM_PRIV], 0);
452 queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
453 &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
454 queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP],
455 &curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED);
456 curthread->robust_list = 0;
460 _pthread_mutex_destroy(pthread_mutex_t *mutex)
462 pthread_mutex_t m, m1;
466 if (m < THR_MUTEX_DESTROYED) {
468 } else if (m == THR_MUTEX_DESTROYED) {
471 if (m == THR_PSHARED_PTR) {
472 m1 = __thr_pshared_offpage(mutex, 0);
474 mutex_assert_not_owned(_get_curthread(), m1);
475 __thr_pshared_destroy(mutex);
477 *mutex = THR_MUTEX_DESTROYED;
480 if (PMUTEX_OWNER_ID(m) != 0 &&
481 (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) {
484 *mutex = THR_MUTEX_DESTROYED;
485 mutex_assert_not_owned(_get_curthread(), m);
495 mutex_qidx(struct pthread_mutex *m)
498 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
500 return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP);
504 * Both enqueue_mutex() and dequeue_mutex() operate on the
505 * thread-private linkage of the locked mutexes and on the robust
508 * Robust list, as seen by kernel, must be consistent even in the case
509 * of thread termination at arbitrary moment. Since either enqueue or
510 * dequeue for list walked by kernel consists of rewriting a single
511 * forward pointer, it is safe. On the other hand, rewrite of the
512 * back pointer is not atomic WRT the forward one, but kernel does not
516 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m,
519 struct pthread_mutex *m1;
523 /* Add to the list of owned mutexes: */
524 if (error != EOWNERDEAD)
525 mutex_assert_not_owned(curthread, m);
526 qidx = mutex_qidx(m);
527 TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
528 if (!is_pshared_mutex(m))
529 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
530 if (is_robust_mutex(m)) {
531 rl = is_pshared_mutex(m) ? &curthread->robust_list :
532 &curthread->priv_robust_list;
535 m1 = __containerof((void *)*rl,
536 struct pthread_mutex, m_lock);
537 m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock;
541 m->m_lock.m_rb_lnk = 0;
543 *rl = (uintptr_t)&m->m_lock;
548 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
550 struct pthread_mutex *mp, *mn;
553 mutex_assert_is_owned(m);
554 qidx = mutex_qidx(m);
555 if (is_robust_mutex(m)) {
558 if (is_pshared_mutex(m)) {
559 curthread->robust_list = m->m_lock.m_rb_lnk;
561 curthread->priv_robust_list =
565 mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk;
567 if (m->m_lock.m_rb_lnk != 0) {
568 mn = __containerof((void *)m->m_lock.m_rb_lnk,
569 struct pthread_mutex, m_lock);
570 mn->m_rb_prev = m->m_rb_prev;
572 m->m_lock.m_rb_lnk = 0;
575 TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
576 if (!is_pshared_mutex(m))
577 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
578 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
579 set_inherited_priority(curthread, m);
584 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
590 if (*m == THR_PSHARED_PTR) {
591 *m = __thr_pshared_offpage(mutex, 0);
595 shared_mutex_init(*m, NULL);
596 } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
597 if (*m == THR_MUTEX_DESTROYED) {
600 ret = init_static(_get_curthread(), mutex);
609 __pthread_mutex_trylock(pthread_mutex_t *mutex)
611 struct pthread *curthread;
612 struct pthread_mutex *m;
616 ret = check_and_init_mutex(mutex, &m);
619 curthread = _get_curthread();
621 if (m->m_flags & PMUTEX_FLAG_PRIVATE)
622 THR_CRITICAL_ENTER(curthread);
623 robust = _mutex_enter_robust(curthread, m);
624 ret = _thr_umutex_trylock(&m->m_lock, id);
625 if (__predict_true(ret == 0) || ret == EOWNERDEAD) {
626 enqueue_mutex(curthread, m, ret);
627 if (ret == EOWNERDEAD)
628 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
629 } else if (PMUTEX_OWNER_ID(m) == id) {
630 ret = mutex_self_trylock(m);
633 _mutex_leave_robust(curthread, m);
634 if (ret != 0 && ret != EOWNERDEAD &&
635 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0)
636 THR_CRITICAL_LEAVE(curthread);
641 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
642 const struct timespec *abstime)
648 if (PMUTEX_OWNER_ID(m) == id)
649 return (mutex_self_lock(m, abstime));
652 * For adaptive mutexes, spin for a bit in the expectation
653 * that if the application requests this mutex type then
654 * the lock is likely to be released quickly and it is
655 * faster than entering the kernel
657 if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT |
658 UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0))
659 goto sleep_in_kernel;
664 count = m->m_spinloops;
666 owner = m->m_lock.m_owner;
667 if ((owner & ~UMUTEX_CONTESTED) == 0) {
668 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
678 count = m->m_yieldloops;
681 owner = m->m_lock.m_owner;
682 if ((owner & ~UMUTEX_CONTESTED) == 0) {
683 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
693 ret = __thr_umutex_lock(&m->m_lock, id);
694 else if (__predict_false(abstime->tv_nsec < 0 ||
695 abstime->tv_nsec >= 1000000000))
698 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
700 if (ret == 0 || ret == EOWNERDEAD) {
701 enqueue_mutex(curthread, m, ret);
702 if (ret == EOWNERDEAD)
703 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
709 mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime,
710 bool cvattach, bool rb_onlist)
712 struct pthread *curthread;
715 robust = 0; /* pacify gcc */
716 curthread = _get_curthread();
717 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
718 THR_CRITICAL_ENTER(curthread);
720 robust = _mutex_enter_robust(curthread, m);
721 ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
722 if (ret == 0 || ret == EOWNERDEAD) {
723 enqueue_mutex(curthread, m, ret);
724 if (ret == EOWNERDEAD)
725 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
727 ret = mutex_lock_sleep(curthread, m, abstime);
729 if (!rb_onlist && robust)
730 _mutex_leave_robust(curthread, m);
731 if (ret != 0 && ret != EOWNERDEAD &&
732 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach)
733 THR_CRITICAL_LEAVE(curthread);
738 __pthread_mutex_lock(pthread_mutex_t *mutex)
740 struct pthread_mutex *m;
744 ret = check_and_init_mutex(mutex, &m);
746 ret = mutex_lock_common(m, NULL, false, false);
751 __pthread_mutex_timedlock(pthread_mutex_t *mutex,
752 const struct timespec *abstime)
754 struct pthread_mutex *m;
758 ret = check_and_init_mutex(mutex, &m);
760 ret = mutex_lock_common(m, abstime, false, false);
765 _pthread_mutex_unlock(pthread_mutex_t *mutex)
767 struct pthread_mutex *mp;
769 if (*mutex == THR_PSHARED_PTR) {
770 mp = __thr_pshared_offpage(mutex, 0);
773 shared_mutex_init(mp, NULL);
777 return (mutex_unlock_common(mp, false, NULL));
781 _mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist)
785 error = mutex_lock_common(m, NULL, true, rb_onlist);
786 if (error == 0 || error == EOWNERDEAD)
792 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
796 * Clear the count in case this is a recursive mutex.
800 (void)mutex_unlock_common(m, true, defer);
805 _mutex_cv_attach(struct pthread_mutex *m, int count)
807 struct pthread *curthread;
809 curthread = _get_curthread();
810 enqueue_mutex(curthread, m, 0);
816 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
818 struct pthread *curthread;
821 curthread = _get_curthread();
822 if ((error = _mutex_owned(curthread, mp)) != 0)
826 * Clear the count in case this is a recursive mutex.
828 *recurse = mp->m_count;
830 dequeue_mutex(curthread, mp);
832 /* Will this happen in real-world ? */
833 if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
835 mp->m_flags &= ~PMUTEX_FLAG_DEFERRED;
840 _thr_wake_all(curthread->defer_waiters,
841 curthread->nwaiter_defer);
842 curthread->nwaiter_defer = 0;
848 mutex_self_trylock(struct pthread_mutex *m)
852 switch (PMUTEX_TYPE(m->m_flags)) {
853 case PTHREAD_MUTEX_ERRORCHECK:
854 case PTHREAD_MUTEX_NORMAL:
855 case PTHREAD_MUTEX_ADAPTIVE_NP:
859 case PTHREAD_MUTEX_RECURSIVE:
860 /* Increment the lock count: */
861 if (m->m_count + 1 > 0) {
869 /* Trap invalid mutex types; */
877 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
879 struct timespec ts1, ts2;
882 switch (PMUTEX_TYPE(m->m_flags)) {
883 case PTHREAD_MUTEX_ERRORCHECK:
884 case PTHREAD_MUTEX_ADAPTIVE_NP:
886 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
887 abstime->tv_nsec >= 1000000000) {
890 clock_gettime(CLOCK_REALTIME, &ts1);
891 TIMESPEC_SUB(&ts2, abstime, &ts1);
892 __sys_nanosleep(&ts2, NULL);
897 * POSIX specifies that mutexes should return
898 * EDEADLK if a recursive lock is detected.
904 case PTHREAD_MUTEX_NORMAL:
906 * What SS2 define as a 'normal' mutex. Intentionally
907 * deadlock on attempts to get a lock you already own.
911 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
912 abstime->tv_nsec >= 1000000000) {
915 clock_gettime(CLOCK_REALTIME, &ts1);
916 TIMESPEC_SUB(&ts2, abstime, &ts1);
917 __sys_nanosleep(&ts2, NULL);
924 __sys_nanosleep(&ts1, NULL);
928 case PTHREAD_MUTEX_RECURSIVE:
929 /* Increment the lock count: */
930 if (m->m_count + 1 > 0) {
938 /* Trap invalid mutex types; */
946 mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer)
948 struct pthread *curthread;
950 int deferred, error, robust;
952 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
953 if (m == THR_MUTEX_DESTROYED)
958 curthread = _get_curthread();
962 * Check if the running thread is not the owner of the mutex.
964 if (__predict_false(PMUTEX_OWNER_ID(m) != id))
968 if (__predict_false(PMUTEX_TYPE(m->m_flags) ==
969 PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) {
972 if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
974 m->m_flags &= ~PMUTEX_FLAG_DEFERRED;
978 robust = _mutex_enter_robust(curthread, m);
979 dequeue_mutex(curthread, m);
980 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
982 if (mtx_defer == NULL) {
983 _thr_wake_all(curthread->defer_waiters,
984 curthread->nwaiter_defer);
985 curthread->nwaiter_defer = 0;
990 _mutex_leave_robust(curthread, m);
992 if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
993 THR_CRITICAL_LEAVE(curthread);
998 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
1001 struct pthread_mutex *m;
1003 if (*mutex == THR_PSHARED_PTR) {
1004 m = __thr_pshared_offpage(mutex, 0);
1007 shared_mutex_init(m, NULL);
1010 if (m <= THR_MUTEX_DESTROYED)
1013 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
1015 *prioceiling = m->m_lock.m_ceilings[0];
1020 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
1021 int ceiling, int *old_ceiling)
1023 struct pthread *curthread;
1024 struct pthread_mutex *m, *m1, *m2;
1025 struct mutex_queue *q, *qp;
1028 if (*mutex == THR_PSHARED_PTR) {
1029 m = __thr_pshared_offpage(mutex, 0);
1032 shared_mutex_init(m, NULL);
1035 if (m <= THR_MUTEX_DESTROYED)
1038 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
1041 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
1045 curthread = _get_curthread();
1046 if (PMUTEX_OWNER_ID(m) == TID(curthread)) {
1047 mutex_assert_is_owned(m);
1048 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
1049 m2 = TAILQ_NEXT(m, m_qe);
1050 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
1051 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
1052 qidx = mutex_qidx(m);
1053 q = &curthread->mq[qidx];
1054 qp = &curthread->mq[qidx + 1];
1055 TAILQ_REMOVE(q, m, m_qe);
1056 if (!is_pshared_mutex(m))
1057 TAILQ_REMOVE(qp, m, m_pqe);
1058 TAILQ_FOREACH(m2, q, m_qe) {
1059 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
1060 TAILQ_INSERT_BEFORE(m2, m, m_qe);
1061 if (!is_pshared_mutex(m)) {
1062 while (m2 != NULL &&
1063 is_pshared_mutex(m2)) {
1068 TAILQ_INSERT_HEAD(qp,
1071 TAILQ_INSERT_BEFORE(m2,
1078 TAILQ_INSERT_TAIL(q, m, m_qe);
1079 if (!is_pshared_mutex(m))
1080 TAILQ_INSERT_TAIL(qp, m, m_pqe);
1087 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
1089 struct pthread_mutex *m;
1092 ret = check_and_init_mutex(mutex, &m);
1094 *count = m->m_spinloops;
1099 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
1101 struct pthread_mutex *m;
1104 ret = check_and_init_mutex(mutex, &m);
1106 m->m_spinloops = count;
1111 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
1113 struct pthread_mutex *m;
1116 ret = check_and_init_mutex(mutex, &m);
1118 *count = m->m_yieldloops;
1123 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
1125 struct pthread_mutex *m;
1128 ret = check_and_init_mutex(mutex, &m);
1130 m->m_yieldloops = count;
1135 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
1137 struct pthread_mutex *m;
1139 if (*mutex == THR_PSHARED_PTR) {
1140 m = __thr_pshared_offpage(mutex, 0);
1143 shared_mutex_init(m, NULL);
1146 if (m <= THR_MUTEX_DESTROYED)
1149 return (PMUTEX_OWNER_ID(m) == TID(_get_curthread()));
1153 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
1156 if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
1157 if (mp == THR_MUTEX_DESTROYED)
1161 if (PMUTEX_OWNER_ID(mp) != TID(curthread))
1167 _pthread_mutex_consistent(pthread_mutex_t *mutex)
1169 struct pthread_mutex *m;
1170 struct pthread *curthread;
1172 if (*mutex == THR_PSHARED_PTR) {
1173 m = __thr_pshared_offpage(mutex, 0);
1176 shared_mutex_init(m, NULL);
1179 if (m <= THR_MUTEX_DESTROYED)
1182 curthread = _get_curthread();
1183 if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) !=
1184 (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT))
1186 if (PMUTEX_OWNER_ID(m) != TID(curthread))
1188 m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT;