2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include "namespace.h"
36 #include <sys/param.h>
37 #include <sys/queue.h>
39 #include <pthread_np.h>
40 #include "un-namespace.h"
41 #include "thr_private.h"
43 #if defined(_PTHREADS_INVARIANTS)
44 #define MUTEX_INIT_LINK(m) do { \
45 (m)->m_qe.tqe_prev = NULL; \
46 (m)->m_qe.tqe_next = NULL; \
48 #define MUTEX_ASSERT_IS_OWNED(m) do { \
49 if ((m)->m_qe.tqe_prev == NULL) \
50 PANIC("mutex is not on list"); \
52 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
53 if (((m)->m_qe.tqe_prev != NULL) || \
54 ((m)->m_qe.tqe_next != NULL)) \
55 PANIC("mutex is on list"); \
57 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
58 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
59 "thread in syncq when it shouldn't be."); \
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
68 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
69 #define MUTEX_DESTROY(m) do { \
70 _lock_destroy(&(m)->m_lock); \
78 static struct kse_mailbox *mutex_handoff(struct pthread *,
79 struct pthread_mutex *);
80 static inline int mutex_self_trylock(pthread_mutex_t);
81 static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
82 static int mutex_unlock_common(pthread_mutex_t *, int);
83 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
84 static void mutex_rescan_owned (struct pthread *, struct pthread *,
85 struct pthread_mutex *);
86 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
87 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
88 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
89 static void mutex_lock_backout(void *arg);
91 int __pthread_mutex_init(pthread_mutex_t *mutex,
92 const pthread_mutexattr_t *mutex_attr);
93 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
94 int __pthread_mutex_lock(pthread_mutex_t *m);
95 int __pthread_mutex_timedlock(pthread_mutex_t *m,
96 const struct timespec *abs_timeout);
97 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
98 void *(calloc_cb)(size_t, size_t));
101 static struct pthread_mutex_attr static_mutex_attr =
102 PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
103 static pthread_mutexattr_t static_mattr = &static_mutex_attr;
105 /* Single underscore versions provided for libc internal usage: */
106 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
107 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
108 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
109 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
111 /* No difference between libc and application usage of these: */
112 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
113 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
114 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
117 thr_mutex_init(pthread_mutex_t *mutex,
118 const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
120 struct pthread_mutex *pmutex;
121 enum pthread_mutextype type;
130 /* Check if default mutex attributes: */
131 else if (mutex_attr == NULL || *mutex_attr == NULL) {
132 /* Default to a (error checking) POSIX mutex: */
133 type = PTHREAD_MUTEX_ERRORCHECK;
134 protocol = PTHREAD_PRIO_NONE;
135 ceiling = THR_MAX_PRIORITY;
139 /* Check mutex type: */
140 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
141 ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
142 /* Return an invalid argument error: */
145 /* Check mutex protocol: */
146 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
147 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
148 /* Return an invalid argument error: */
152 /* Use the requested mutex type and protocol: */
153 type = (*mutex_attr)->m_type;
154 protocol = (*mutex_attr)->m_protocol;
155 ceiling = (*mutex_attr)->m_ceiling;
156 flags = (*mutex_attr)->m_flags;
159 /* Check no errors so far: */
161 if ((pmutex = (pthread_mutex_t)
162 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
164 else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
165 _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
170 /* Set the mutex flags: */
171 pmutex->m_flags = flags;
173 /* Process according to mutex type: */
175 /* case PTHREAD_MUTEX_DEFAULT: */
176 case PTHREAD_MUTEX_ERRORCHECK:
177 case PTHREAD_MUTEX_NORMAL:
178 case PTHREAD_MUTEX_ADAPTIVE_NP:
179 /* Nothing to do here. */
182 /* Single UNIX Spec 2 recursive mutex: */
183 case PTHREAD_MUTEX_RECURSIVE:
184 /* Reset the mutex count: */
188 /* Trap invalid mutex types: */
190 /* Return an invalid argument error: */
195 /* Initialise the rest of the mutex: */
196 TAILQ_INIT(&pmutex->m_queue);
197 pmutex->m_flags |= MUTEX_FLAGS_INITED;
198 pmutex->m_owner = NULL;
199 pmutex->m_type = type;
200 pmutex->m_protocol = protocol;
201 pmutex->m_refcount = 0;
202 if (protocol == PTHREAD_PRIO_PROTECT)
203 pmutex->m_prio = ceiling;
206 pmutex->m_saved_prio = 0;
207 MUTEX_INIT_LINK(pmutex);
210 /* Free the mutex lock structure: */
211 MUTEX_DESTROY(pmutex);
216 /* Return the completion status: */
221 __pthread_mutex_init(pthread_mutex_t *mutex,
222 const pthread_mutexattr_t *mutex_attr)
225 return (thr_mutex_init(mutex, mutex_attr, calloc));
229 _pthread_mutex_init(pthread_mutex_t *mutex,
230 const pthread_mutexattr_t *mutex_attr)
232 struct pthread_mutex_attr mattr, *mattrp;
234 if ((mutex_attr == NULL) || (*mutex_attr == NULL))
235 return (__pthread_mutex_init(mutex, &static_mattr));
237 mattr = **mutex_attr;
238 mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
240 return (__pthread_mutex_init(mutex, &mattrp));
244 /* This function is used internally by malloc. */
246 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
247 void *(calloc_cb)(size_t, size_t))
249 static const struct pthread_mutex_attr attr = {
250 .m_type = PTHREAD_MUTEX_NORMAL,
251 .m_protocol = PTHREAD_PRIO_NONE,
255 static const struct pthread_mutex_attr *pattr = &attr;
257 return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&pattr,
262 _thr_mutex_reinit(pthread_mutex_t *mutex)
264 _lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
265 _thr_lock_wait, _thr_lock_wakeup);
266 TAILQ_INIT(&(*mutex)->m_queue);
267 (*mutex)->m_owner = NULL;
268 (*mutex)->m_count = 0;
269 (*mutex)->m_refcount = 0;
270 (*mutex)->m_prio = 0;
271 (*mutex)->m_saved_prio = 0;
275 _pthread_mutex_destroy(pthread_mutex_t *mutex)
277 struct pthread *curthread = _get_curthread();
281 if (mutex == NULL || *mutex == NULL)
284 /* Lock the mutex structure: */
285 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
288 * Check to see if this mutex is in use:
290 if (((*mutex)->m_owner != NULL) ||
291 (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
292 ((*mutex)->m_refcount != 0)) {
295 /* Unlock the mutex structure: */
296 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
299 * Save a pointer to the mutex so it can be free'd
300 * and set the caller's pointer to NULL:
305 /* Unlock the mutex structure: */
306 THR_LOCK_RELEASE(curthread, &m->m_lock);
309 * Free the memory allocated for the mutex
312 MUTEX_ASSERT_NOT_OWNED(m);
317 /* Return the completion status: */
322 init_static(struct pthread *thread, pthread_mutex_t *mutex)
326 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
329 ret = _pthread_mutex_init(mutex, NULL);
333 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
339 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
343 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
346 ret = _pthread_mutex_init(mutex, &static_mattr);
350 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
356 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
361 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
362 "Uninitialized mutex in pthread_mutex_trylock_basic");
364 /* Lock the mutex structure: */
365 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
366 private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
369 * If the mutex was statically allocated, properly
370 * initialize the tail queue.
372 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
373 TAILQ_INIT(&(*mutex)->m_queue);
374 MUTEX_INIT_LINK(*mutex);
375 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
378 /* Process according to mutex type: */
379 switch ((*mutex)->m_protocol) {
380 /* Default POSIX mutex: */
381 case PTHREAD_PRIO_NONE:
382 /* Check if this mutex is not locked: */
383 if ((*mutex)->m_owner == NULL) {
384 /* Lock the mutex for the running thread: */
385 (*mutex)->m_owner = curthread;
387 /* Add to the list of owned mutexes: */
388 MUTEX_ASSERT_NOT_OWNED(*mutex);
389 TAILQ_INSERT_TAIL(&curthread->mutexq,
391 } else if ((*mutex)->m_owner == curthread)
392 ret = mutex_self_trylock(*mutex);
394 /* Return a busy error: */
398 /* POSIX priority inheritence mutex: */
399 case PTHREAD_PRIO_INHERIT:
400 /* Check if this mutex is not locked: */
401 if ((*mutex)->m_owner == NULL) {
402 /* Lock the mutex for the running thread: */
403 (*mutex)->m_owner = curthread;
405 THR_SCHED_LOCK(curthread, curthread);
406 /* Track number of priority mutexes owned: */
407 curthread->priority_mutex_count++;
410 * The mutex takes on the attributes of the
411 * running thread when there are no waiters.
413 (*mutex)->m_prio = curthread->active_priority;
414 (*mutex)->m_saved_prio =
415 curthread->inherited_priority;
416 curthread->inherited_priority = (*mutex)->m_prio;
417 THR_SCHED_UNLOCK(curthread, curthread);
419 /* Add to the list of owned mutexes: */
420 MUTEX_ASSERT_NOT_OWNED(*mutex);
421 TAILQ_INSERT_TAIL(&curthread->mutexq,
423 } else if ((*mutex)->m_owner == curthread)
424 ret = mutex_self_trylock(*mutex);
426 /* Return a busy error: */
430 /* POSIX priority protection mutex: */
431 case PTHREAD_PRIO_PROTECT:
432 /* Check for a priority ceiling violation: */
433 if (curthread->active_priority > (*mutex)->m_prio)
436 /* Check if this mutex is not locked: */
437 else if ((*mutex)->m_owner == NULL) {
438 /* Lock the mutex for the running thread: */
439 (*mutex)->m_owner = curthread;
441 THR_SCHED_LOCK(curthread, curthread);
442 /* Track number of priority mutexes owned: */
443 curthread->priority_mutex_count++;
446 * The running thread inherits the ceiling
447 * priority of the mutex and executes at that
450 curthread->active_priority = (*mutex)->m_prio;
451 (*mutex)->m_saved_prio =
452 curthread->inherited_priority;
453 curthread->inherited_priority =
455 THR_SCHED_UNLOCK(curthread, curthread);
456 /* Add to the list of owned mutexes: */
457 MUTEX_ASSERT_NOT_OWNED(*mutex);
458 TAILQ_INSERT_TAIL(&curthread->mutexq,
460 } else if ((*mutex)->m_owner == curthread)
461 ret = mutex_self_trylock(*mutex);
463 /* Return a busy error: */
467 /* Trap invalid mutex types: */
469 /* Return an invalid argument error: */
474 if (ret == 0 && private)
475 THR_CRITICAL_ENTER(curthread);
477 /* Unlock the mutex structure: */
478 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
480 /* Return the completion status: */
485 __pthread_mutex_trylock(pthread_mutex_t *mutex)
487 struct pthread *curthread = _get_curthread();
494 * If the mutex is statically initialized, perform the dynamic
497 else if ((*mutex != NULL) ||
498 ((ret = init_static(curthread, mutex)) == 0))
499 ret = mutex_trylock_common(curthread, mutex);
505 _pthread_mutex_trylock(pthread_mutex_t *mutex)
507 struct pthread *curthread = _get_curthread();
514 * If the mutex is statically initialized, perform the dynamic
515 * initialization marking the mutex private (delete safe):
517 else if ((*mutex != NULL) ||
518 ((ret = init_static_private(curthread, mutex)) == 0))
519 ret = mutex_trylock_common(curthread, mutex);
525 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
526 const struct timespec * abstime)
531 THR_ASSERT((m != NULL) && (*m != NULL),
532 "Uninitialized mutex in pthread_mutex_trylock_basic");
534 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
535 abstime->tv_nsec >= 1000000000))
538 /* Reset the interrupted flag: */
539 curthread->interrupted = 0;
540 curthread->timeout = 0;
541 curthread->wakeup_time.tv_sec = -1;
543 private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
546 * Enter a loop waiting to become the mutex owner. We need a
547 * loop in case the waiting thread is interrupted by a signal
548 * to execute a signal handler. It is not (currently) possible
549 * to remain in the waiting queue while running a handler.
550 * Instead, the thread is interrupted and backed out of the
551 * waiting queue prior to executing the signal handler.
554 /* Lock the mutex structure: */
555 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
558 * If the mutex was statically allocated, properly
559 * initialize the tail queue.
561 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
562 TAILQ_INIT(&(*m)->m_queue);
563 (*m)->m_flags |= MUTEX_FLAGS_INITED;
567 /* Process according to mutex type: */
568 switch ((*m)->m_protocol) {
569 /* Default POSIX mutex: */
570 case PTHREAD_PRIO_NONE:
571 if ((*m)->m_owner == NULL) {
572 /* Lock the mutex for this thread: */
573 (*m)->m_owner = curthread;
575 /* Add to the list of owned mutexes: */
576 MUTEX_ASSERT_NOT_OWNED(*m);
577 TAILQ_INSERT_TAIL(&curthread->mutexq,
580 THR_CRITICAL_ENTER(curthread);
582 /* Unlock the mutex structure: */
583 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
584 } else if ((*m)->m_owner == curthread) {
585 ret = mutex_self_lock(curthread, *m);
587 /* Unlock the mutex structure: */
588 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
591 * Join the queue of threads waiting to lock
592 * the mutex and save a pointer to the mutex.
594 mutex_queue_enq(*m, curthread);
595 curthread->data.mutex = *m;
596 curthread->sigbackout = mutex_lock_backout;
598 * This thread is active and is in a critical
599 * region (holding the mutex lock); we should
600 * be able to safely set the state.
602 THR_SCHED_LOCK(curthread, curthread);
603 /* Set the wakeup time: */
605 curthread->wakeup_time.tv_sec =
607 curthread->wakeup_time.tv_nsec =
611 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
612 THR_SCHED_UNLOCK(curthread, curthread);
614 /* Unlock the mutex structure: */
615 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
617 /* Schedule the next thread: */
618 _thr_sched_switch(curthread);
620 if (THR_IN_MUTEXQ(curthread)) {
621 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
622 mutex_queue_remove(*m, curthread);
623 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
626 * Only clear these after assuring the
627 * thread is dequeued.
629 curthread->data.mutex = NULL;
630 curthread->sigbackout = NULL;
634 /* POSIX priority inheritence mutex: */
635 case PTHREAD_PRIO_INHERIT:
636 /* Check if this mutex is not locked: */
637 if ((*m)->m_owner == NULL) {
638 /* Lock the mutex for this thread: */
639 (*m)->m_owner = curthread;
641 THR_SCHED_LOCK(curthread, curthread);
642 /* Track number of priority mutexes owned: */
643 curthread->priority_mutex_count++;
646 * The mutex takes on attributes of the
647 * running thread when there are no waiters.
648 * Make sure the thread's scheduling lock is
649 * held while priorities are adjusted.
651 (*m)->m_prio = curthread->active_priority;
653 curthread->inherited_priority;
654 curthread->inherited_priority = (*m)->m_prio;
655 THR_SCHED_UNLOCK(curthread, curthread);
657 /* Add to the list of owned mutexes: */
658 MUTEX_ASSERT_NOT_OWNED(*m);
659 TAILQ_INSERT_TAIL(&curthread->mutexq,
662 THR_CRITICAL_ENTER(curthread);
664 /* Unlock the mutex structure: */
665 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
666 } else if ((*m)->m_owner == curthread) {
667 ret = mutex_self_lock(curthread, *m);
669 /* Unlock the mutex structure: */
670 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
673 * Join the queue of threads waiting to lock
674 * the mutex and save a pointer to the mutex.
676 mutex_queue_enq(*m, curthread);
677 curthread->data.mutex = *m;
678 curthread->sigbackout = mutex_lock_backout;
681 * This thread is active and is in a critical
682 * region (holding the mutex lock); we should
683 * be able to safely set the state.
685 if (curthread->active_priority > (*m)->m_prio)
686 /* Adjust priorities: */
687 mutex_priority_adjust(curthread, *m);
689 THR_SCHED_LOCK(curthread, curthread);
690 /* Set the wakeup time: */
692 curthread->wakeup_time.tv_sec =
694 curthread->wakeup_time.tv_nsec =
697 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
698 THR_SCHED_UNLOCK(curthread, curthread);
700 /* Unlock the mutex structure: */
701 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
703 /* Schedule the next thread: */
704 _thr_sched_switch(curthread);
706 if (THR_IN_MUTEXQ(curthread)) {
707 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
708 mutex_queue_remove(*m, curthread);
709 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
712 * Only clear these after assuring the
713 * thread is dequeued.
715 curthread->data.mutex = NULL;
716 curthread->sigbackout = NULL;
720 /* POSIX priority protection mutex: */
721 case PTHREAD_PRIO_PROTECT:
722 /* Check for a priority ceiling violation: */
723 if (curthread->active_priority > (*m)->m_prio) {
724 /* Unlock the mutex structure: */
725 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
728 /* Check if this mutex is not locked: */
729 else if ((*m)->m_owner == NULL) {
731 * Lock the mutex for the running
734 (*m)->m_owner = curthread;
736 THR_SCHED_LOCK(curthread, curthread);
737 /* Track number of priority mutexes owned: */
738 curthread->priority_mutex_count++;
741 * The running thread inherits the ceiling
742 * priority of the mutex and executes at that
743 * priority. Make sure the thread's
744 * scheduling lock is held while priorities
747 curthread->active_priority = (*m)->m_prio;
749 curthread->inherited_priority;
750 curthread->inherited_priority = (*m)->m_prio;
751 THR_SCHED_UNLOCK(curthread, curthread);
753 /* Add to the list of owned mutexes: */
754 MUTEX_ASSERT_NOT_OWNED(*m);
755 TAILQ_INSERT_TAIL(&curthread->mutexq,
758 THR_CRITICAL_ENTER(curthread);
760 /* Unlock the mutex structure: */
761 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
762 } else if ((*m)->m_owner == curthread) {
763 ret = mutex_self_lock(curthread, *m);
765 /* Unlock the mutex structure: */
766 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
769 * Join the queue of threads waiting to lock
770 * the mutex and save a pointer to the mutex.
772 mutex_queue_enq(*m, curthread);
773 curthread->data.mutex = *m;
774 curthread->sigbackout = mutex_lock_backout;
776 /* Clear any previous error: */
777 curthread->error = 0;
780 * This thread is active and is in a critical
781 * region (holding the mutex lock); we should
782 * be able to safely set the state.
785 THR_SCHED_LOCK(curthread, curthread);
786 /* Set the wakeup time: */
788 curthread->wakeup_time.tv_sec =
790 curthread->wakeup_time.tv_nsec =
793 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
794 THR_SCHED_UNLOCK(curthread, curthread);
796 /* Unlock the mutex structure: */
797 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
799 /* Schedule the next thread: */
800 _thr_sched_switch(curthread);
802 if (THR_IN_MUTEXQ(curthread)) {
803 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
804 mutex_queue_remove(*m, curthread);
805 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
808 * Only clear these after assuring the
809 * thread is dequeued.
811 curthread->data.mutex = NULL;
812 curthread->sigbackout = NULL;
815 * The threads priority may have changed while
816 * waiting for the mutex causing a ceiling
819 ret = curthread->error;
820 curthread->error = 0;
824 /* Trap invalid mutex types: */
826 /* Unlock the mutex structure: */
827 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
829 /* Return an invalid argument error: */
834 } while (((*m)->m_owner != curthread) && (ret == 0) &&
835 (curthread->interrupted == 0) && (curthread->timeout == 0));
837 if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
841 * Check to see if this thread was interrupted and
842 * is still in the mutex queue of waiting threads:
844 if (curthread->interrupted != 0) {
845 /* Remove this thread from the mutex queue. */
846 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
847 if (THR_IN_SYNCQ(curthread))
848 mutex_queue_remove(*m, curthread);
849 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
851 /* Check for asynchronous cancellation. */
852 if (curthread->continuation != NULL)
853 curthread->continuation((void *) curthread);
856 /* Return the completion status: */
861 __pthread_mutex_lock(pthread_mutex_t *m)
863 struct pthread *curthread;
866 if (_thr_initial == NULL)
867 _libpthread_init(NULL);
869 curthread = _get_curthread();
874 * If the mutex is statically initialized, perform the dynamic
877 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
878 ret = mutex_lock_common(curthread, m, NULL);
883 __strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
886 _pthread_mutex_lock(pthread_mutex_t *m)
888 struct pthread *curthread;
891 if (_thr_initial == NULL)
892 _libpthread_init(NULL);
893 curthread = _get_curthread();
899 * If the mutex is statically initialized, perform the dynamic
900 * initialization marking it private (delete safe):
902 else if ((*m != NULL) ||
903 ((ret = init_static_private(curthread, m)) == 0))
904 ret = mutex_lock_common(curthread, m, NULL);
910 __pthread_mutex_timedlock(pthread_mutex_t *m,
911 const struct timespec *abs_timeout)
913 struct pthread *curthread;
916 if (_thr_initial == NULL)
917 _libpthread_init(NULL);
919 curthread = _get_curthread();
924 * If the mutex is statically initialized, perform the dynamic
927 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
928 ret = mutex_lock_common(curthread, m, abs_timeout);
934 _pthread_mutex_timedlock(pthread_mutex_t *m,
935 const struct timespec *abs_timeout)
937 struct pthread *curthread;
940 if (_thr_initial == NULL)
941 _libpthread_init(NULL);
942 curthread = _get_curthread();
948 * If the mutex is statically initialized, perform the dynamic
949 * initialization marking it private (delete safe):
951 else if ((*m != NULL) ||
952 ((ret = init_static_private(curthread, m)) == 0))
953 ret = mutex_lock_common(curthread, m, abs_timeout);
959 _pthread_mutex_unlock(pthread_mutex_t *m)
961 return (mutex_unlock_common(m, /* add reference */ 0));
964 __strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
967 _mutex_cv_unlock(pthread_mutex_t *m)
969 return (mutex_unlock_common(m, /* add reference */ 1));
973 _mutex_cv_lock(pthread_mutex_t *m)
975 struct pthread *curthread;
978 curthread = _get_curthread();
979 if ((ret = _pthread_mutex_lock(m)) == 0) {
980 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
982 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
988 mutex_self_trylock(pthread_mutex_t m)
993 /* case PTHREAD_MUTEX_DEFAULT: */
994 case PTHREAD_MUTEX_ERRORCHECK:
995 case PTHREAD_MUTEX_NORMAL:
996 case PTHREAD_MUTEX_ADAPTIVE_NP:
1000 case PTHREAD_MUTEX_RECURSIVE:
1001 /* Increment the lock count: */
1006 /* Trap invalid mutex types; */
1014 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
1019 * Don't allow evil recursive mutexes for private use
1020 * in libc and libpthread.
1022 if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1023 PANIC("Recurse on a private mutex.");
1025 switch (m->m_type) {
1026 /* case PTHREAD_MUTEX_DEFAULT: */
1027 case PTHREAD_MUTEX_ERRORCHECK:
1028 case PTHREAD_MUTEX_ADAPTIVE_NP:
1030 * POSIX specifies that mutexes should return EDEADLK if a
1031 * recursive lock is detected.
1036 case PTHREAD_MUTEX_NORMAL:
1038 * What SS2 define as a 'normal' mutex. Intentionally
1039 * deadlock on attempts to get a lock you already own.
1042 THR_SCHED_LOCK(curthread, curthread);
1043 THR_SET_STATE(curthread, PS_DEADLOCK);
1044 THR_SCHED_UNLOCK(curthread, curthread);
1046 /* Unlock the mutex structure: */
1047 THR_LOCK_RELEASE(curthread, &m->m_lock);
1049 /* Schedule the next thread: */
1050 _thr_sched_switch(curthread);
1053 case PTHREAD_MUTEX_RECURSIVE:
1054 /* Increment the lock count: */
1059 /* Trap invalid mutex types; */
1067 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1069 struct pthread *curthread = _get_curthread();
1070 struct kse_mailbox *kmbx = NULL;
1073 if (m == NULL || *m == NULL)
1076 /* Lock the mutex structure: */
1077 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1079 /* Process according to mutex type: */
1080 switch ((*m)->m_protocol) {
1081 /* Default POSIX mutex: */
1082 case PTHREAD_PRIO_NONE:
1084 * Check if the running thread is not the owner of the
1087 if ((*m)->m_owner != curthread)
1089 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1090 ((*m)->m_count > 0))
1091 /* Decrement the count: */
1095 * Clear the count in case this is a recursive
1100 /* Remove the mutex from the threads queue. */
1101 MUTEX_ASSERT_IS_OWNED(*m);
1102 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1104 MUTEX_INIT_LINK(*m);
1107 * Hand off the mutex to the next waiting
1110 kmbx = mutex_handoff(curthread, *m);
1114 /* POSIX priority inheritence mutex: */
1115 case PTHREAD_PRIO_INHERIT:
1117 * Check if the running thread is not the owner of the
1120 if ((*m)->m_owner != curthread)
1122 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1123 ((*m)->m_count > 0))
1124 /* Decrement the count: */
1128 * Clear the count in case this is recursive
1134 * Restore the threads inherited priority and
1135 * recompute the active priority (being careful
1136 * not to override changes in the threads base
1137 * priority subsequent to locking the mutex).
1139 THR_SCHED_LOCK(curthread, curthread);
1140 curthread->inherited_priority =
1142 curthread->active_priority =
1143 MAX(curthread->inherited_priority,
1144 curthread->base_priority);
1147 * This thread now owns one less priority mutex.
1149 curthread->priority_mutex_count--;
1150 THR_SCHED_UNLOCK(curthread, curthread);
1152 /* Remove the mutex from the threads queue. */
1153 MUTEX_ASSERT_IS_OWNED(*m);
1154 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1156 MUTEX_INIT_LINK(*m);
1159 * Hand off the mutex to the next waiting
1162 kmbx = mutex_handoff(curthread, *m);
1166 /* POSIX priority ceiling mutex: */
1167 case PTHREAD_PRIO_PROTECT:
1169 * Check if the running thread is not the owner of the
1172 if ((*m)->m_owner != curthread)
1174 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1175 ((*m)->m_count > 0))
1176 /* Decrement the count: */
1180 * Clear the count in case this is a recursive
1186 * Restore the threads inherited priority and
1187 * recompute the active priority (being careful
1188 * not to override changes in the threads base
1189 * priority subsequent to locking the mutex).
1191 THR_SCHED_LOCK(curthread, curthread);
1192 curthread->inherited_priority =
1194 curthread->active_priority =
1195 MAX(curthread->inherited_priority,
1196 curthread->base_priority);
1199 * This thread now owns one less priority mutex.
1201 curthread->priority_mutex_count--;
1202 THR_SCHED_UNLOCK(curthread, curthread);
1204 /* Remove the mutex from the threads queue. */
1205 MUTEX_ASSERT_IS_OWNED(*m);
1206 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1208 MUTEX_INIT_LINK(*m);
1211 * Hand off the mutex to the next waiting
1214 kmbx = mutex_handoff(curthread, *m);
1218 /* Trap invalid mutex types: */
1220 /* Return an invalid argument error: */
1225 if ((ret == 0) && (add_reference != 0))
1226 /* Increment the reference count: */
1229 /* Leave the critical region if this is a private mutex. */
1230 if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1231 THR_CRITICAL_LEAVE(curthread);
1233 /* Unlock the mutex structure: */
1234 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1240 /* Return the completion status: */
1246 * This function is called when a change in base priority occurs for
1247 * a thread that is holding or waiting for a priority protection or
1248 * inheritence mutex. A change in a threads base priority can effect
1249 * changes to active priorities of other threads and to the ordering
1250 * of mutex locking by waiting threads.
1252 * This must be called without the target thread's scheduling lock held.
1255 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1258 struct pthread_mutex *m;
1260 /* Adjust the priorites of any owned priority mutexes: */
1261 if (pthread->priority_mutex_count > 0) {
1263 * Rescan the mutexes owned by this thread and correct
1264 * their priorities to account for this threads change
1265 * in priority. This has the side effect of changing
1266 * the threads active priority.
1268 * Be sure to lock the first mutex in the list of owned
1269 * mutexes. This acts as a barrier against another
1270 * simultaneous call to change the threads priority
1271 * and from the owning thread releasing the mutex.
1273 m = TAILQ_FIRST(&pthread->mutexq);
1275 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1277 * Make sure the thread still owns the lock.
1279 if (m == TAILQ_FIRST(&pthread->mutexq))
1280 mutex_rescan_owned(curthread, pthread,
1281 /* rescan all owned */ NULL);
1282 THR_LOCK_RELEASE(curthread, &m->m_lock);
1287 * If this thread is waiting on a priority inheritence mutex,
1288 * check for priority adjustments. A change in priority can
1289 * also cause a ceiling violation(*) for a thread waiting on
1290 * a priority protection mutex; we don't perform the check here
1291 * as it is done in pthread_mutex_unlock.
1293 * (*) It should be noted that a priority change to a thread
1294 * _after_ taking and owning a priority ceiling mutex
1295 * does not affect ownership of that mutex; the ceiling
1296 * priority is only checked before mutex ownership occurs.
1298 if (propagate_prio != 0) {
1300 * Lock the thread's scheduling queue. This is a bit
1301 * convoluted; the "in synchronization queue flag" can
1302 * only be cleared with both the thread's scheduling and
1303 * mutex locks held. The thread's pointer to the wanted
1304 * mutex is guaranteed to be valid during this time.
1306 THR_SCHED_LOCK(curthread, pthread);
1308 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1309 ((m = pthread->data.mutex) == NULL))
1310 THR_SCHED_UNLOCK(curthread, pthread);
1313 * This thread is currently waiting on a mutex; unlock
1314 * the scheduling queue lock and lock the mutex. We
1315 * can't hold both at the same time because the locking
1316 * order could cause a deadlock.
1318 THR_SCHED_UNLOCK(curthread, pthread);
1319 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1322 * Check to make sure this thread is still in the
1323 * same state (the lock above can yield the CPU to
1324 * another thread or the thread may be running on
1327 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1328 (pthread->data.mutex == m)) {
1330 * Remove and reinsert this thread into
1331 * the list of waiting threads to preserve
1332 * decreasing priority order.
1334 mutex_queue_remove(m, pthread);
1335 mutex_queue_enq(m, pthread);
1337 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1338 /* Adjust priorities: */
1339 mutex_priority_adjust(curthread, m);
1342 /* Unlock the mutex structure: */
1343 THR_LOCK_RELEASE(curthread, &m->m_lock);
1349 * Called when a new thread is added to the mutex waiting queue or
1350 * when a threads priority changes that is already in the mutex
1353 * This must be called with the mutex locked by the current thread.
1356 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1358 pthread_mutex_t m = mutex;
1359 struct pthread *pthread_next, *pthread = mutex->m_owner;
1360 int done, temp_prio;
1363 * Calculate the mutex priority as the maximum of the highest
1364 * active priority of any waiting threads and the owning threads
1365 * active priority(*).
1367 * (*) Because the owning threads current active priority may
1368 * reflect priority inherited from this mutex (and the mutex
1369 * priority may have changed) we must recalculate the active
1370 * priority based on the threads saved inherited priority
1371 * and its base priority.
1373 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1374 temp_prio = MAX(pthread_next->active_priority,
1375 MAX(m->m_saved_prio, pthread->base_priority));
1377 /* See if this mutex really needs adjusting: */
1378 if (temp_prio == m->m_prio)
1379 /* No need to propagate the priority: */
1382 /* Set new priority of the mutex: */
1383 m->m_prio = temp_prio;
1386 * Don't unlock the mutex passed in as an argument. It is
1387 * expected to be locked and unlocked by the caller.
1392 * Save the threads priority before rescanning the
1395 temp_prio = pthread->active_priority;
1398 * Fix the priorities for all mutexes held by the owning
1399 * thread since taking this mutex. This also has a
1400 * potential side-effect of changing the threads priority.
1402 * At this point the mutex is locked by the current thread.
1403 * The owning thread can't release the mutex until it is
1404 * unlocked, so we should be able to safely walk its list
1407 mutex_rescan_owned(curthread, pthread, m);
1410 * If this isn't the first time through the loop,
1411 * the current mutex needs to be unlocked.
1414 THR_LOCK_RELEASE(curthread, &m->m_lock);
1416 /* Assume we're done unless told otherwise: */
1420 * If the thread is currently waiting on a mutex, check
1421 * to see if the threads new priority has affected the
1422 * priority of the mutex.
1424 if ((temp_prio != pthread->active_priority) &&
1425 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1426 ((m = pthread->data.mutex) != NULL) &&
1427 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1428 /* Lock the mutex structure: */
1429 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1432 * Make sure the thread is still waiting on the
1435 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1436 (m == pthread->data.mutex)) {
1438 * The priority for this thread has changed.
1439 * Remove and reinsert this thread into the
1440 * list of waiting threads to preserve
1441 * decreasing priority order.
1443 mutex_queue_remove(m, pthread);
1444 mutex_queue_enq(m, pthread);
1447 * Grab the waiting thread with highest
1450 pthread_next = TAILQ_FIRST(&m->m_queue);
1453 * Calculate the mutex priority as the maximum
1454 * of the highest active priority of any
1455 * waiting threads and the owning threads
1458 temp_prio = MAX(pthread_next->active_priority,
1459 MAX(m->m_saved_prio,
1460 m->m_owner->base_priority));
1462 if (temp_prio != m->m_prio) {
1464 * The priority needs to be propagated
1465 * to the mutex this thread is waiting
1466 * on and up to the owner of that mutex.
1468 m->m_prio = temp_prio;
1469 pthread = m->m_owner;
1471 /* We're not done yet: */
1475 /* Only release the mutex if we're done: */
1477 THR_LOCK_RELEASE(curthread, &m->m_lock);
1479 } while (done == 0);
1483 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1484 struct pthread_mutex *mutex)
1486 struct pthread_mutex *m;
1487 struct pthread *pthread_next;
1488 int active_prio, inherited_prio;
1491 * Start walking the mutexes the thread has taken since
1492 * taking this mutex.
1494 if (mutex == NULL) {
1496 * A null mutex means start at the beginning of the owned
1499 m = TAILQ_FIRST(&pthread->mutexq);
1501 /* There is no inherited priority yet. */
1505 * The caller wants to start after a specific mutex. It
1506 * is assumed that this mutex is a priority inheritence
1507 * mutex and that its priority has been correctly
1510 m = TAILQ_NEXT(mutex, m_qe);
1512 /* Start inheriting priority from the specified mutex. */
1513 inherited_prio = mutex->m_prio;
1515 active_prio = MAX(inherited_prio, pthread->base_priority);
1517 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1519 * We only want to deal with priority inheritence
1520 * mutexes. This might be optimized by only placing
1521 * priority inheritence mutexes into the owned mutex
1522 * list, but it may prove to be useful having all
1523 * owned mutexes in this list. Consider a thread
1524 * exiting while holding mutexes...
1526 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1528 * Fix the owners saved (inherited) priority to
1529 * reflect the priority of the previous mutex.
1531 m->m_saved_prio = inherited_prio;
1533 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1534 /* Recalculate the priority of the mutex: */
1535 m->m_prio = MAX(active_prio,
1536 pthread_next->active_priority);
1538 m->m_prio = active_prio;
1540 /* Recalculate new inherited and active priorities: */
1541 inherited_prio = m->m_prio;
1542 active_prio = MAX(m->m_prio, pthread->base_priority);
1547 * Fix the threads inherited priority and recalculate its
1550 pthread->inherited_priority = inherited_prio;
1551 active_prio = MAX(inherited_prio, pthread->base_priority);
1553 if (active_prio != pthread->active_priority) {
1554 /* Lock the thread's scheduling queue: */
1555 THR_SCHED_LOCK(curthread, pthread);
1557 if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1559 * This thread is not in a run queue. Just set
1560 * its active priority.
1562 pthread->active_priority = active_prio;
1566 * This thread is in a run queue. Remove it from
1567 * the queue before changing its priority:
1569 THR_RUNQ_REMOVE(pthread);
1572 * POSIX states that if the priority is being
1573 * lowered, the thread must be inserted at the
1574 * head of the queue for its priority if it owns
1575 * any priority protection or inheritence mutexes.
1577 if ((active_prio < pthread->active_priority) &&
1578 (pthread->priority_mutex_count > 0)) {
1579 /* Set the new active priority. */
1580 pthread->active_priority = active_prio;
1582 THR_RUNQ_INSERT_HEAD(pthread);
1584 /* Set the new active priority. */
1585 pthread->active_priority = active_prio;
1587 THR_RUNQ_INSERT_TAIL(pthread);
1590 THR_SCHED_UNLOCK(curthread, pthread);
1595 _mutex_unlock_private(pthread_t pthread)
1597 struct pthread_mutex *m, *m_next;
1599 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1600 m_next = TAILQ_NEXT(m, m_qe);
1601 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1602 _pthread_mutex_unlock(&m);
1607 * This is called by the current thread when it wants to back out of a
1608 * mutex_lock in order to run a signal handler.
1611 mutex_lock_backout(void *arg)
1613 struct pthread *curthread = (struct pthread *)arg;
1614 struct pthread_mutex *m;
1616 if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1618 * Any other thread may clear the "in sync queue flag",
1619 * but only the current thread can clear the pointer
1620 * to the mutex. So if the flag is set, we can
1621 * guarantee that the pointer to the mutex is valid.
1622 * The only problem may be if the mutex is destroyed
1623 * out from under us, but that should be considered
1624 * an application bug.
1626 m = curthread->data.mutex;
1628 /* Lock the mutex structure: */
1629 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1633 * Check to make sure this thread doesn't already own
1634 * the mutex. Since mutexes are unlocked with direct
1635 * handoffs, it is possible the previous owner gave it
1636 * to us after we checked the sync queue flag and before
1637 * we locked the mutex structure.
1639 if (m->m_owner == curthread) {
1640 THR_LOCK_RELEASE(curthread, &m->m_lock);
1641 mutex_unlock_common(&m, /* add_reference */ 0);
1644 * Remove ourselves from the mutex queue and
1645 * clear the pointer to the mutex. We may no
1646 * longer be in the mutex queue, but the removal
1647 * function will DTRT.
1649 mutex_queue_remove(m, curthread);
1650 curthread->data.mutex = NULL;
1651 THR_LOCK_RELEASE(curthread, &m->m_lock);
1654 /* No need to call this again. */
1655 curthread->sigbackout = NULL;
1659 * Dequeue a waiting thread from the head of a mutex queue in descending
1662 * In order to properly dequeue a thread from the mutex queue and
1663 * make it runnable without the possibility of errant wakeups, it
1664 * is necessary to lock the thread's scheduling queue while also
1665 * holding the mutex lock.
1667 static struct kse_mailbox *
1668 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1670 struct kse_mailbox *kmbx = NULL;
1671 struct pthread *pthread;
1673 /* Keep dequeueing until we find a valid thread: */
1674 mutex->m_owner = NULL;
1675 pthread = TAILQ_FIRST(&mutex->m_queue);
1676 while (pthread != NULL) {
1677 /* Take the thread's scheduling lock: */
1678 THR_SCHED_LOCK(curthread, pthread);
1680 /* Remove the thread from the mutex queue: */
1681 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1682 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1685 * Only exit the loop if the thread hasn't been
1688 switch (mutex->m_protocol) {
1689 case PTHREAD_PRIO_NONE:
1691 * Assign the new owner and add the mutex to the
1692 * thread's list of owned mutexes.
1694 mutex->m_owner = pthread;
1695 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1698 case PTHREAD_PRIO_INHERIT:
1700 * Assign the new owner and add the mutex to the
1701 * thread's list of owned mutexes.
1703 mutex->m_owner = pthread;
1704 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1706 /* Track number of priority mutexes owned: */
1707 pthread->priority_mutex_count++;
1710 * Set the priority of the mutex. Since our waiting
1711 * threads are in descending priority order, the
1712 * priority of the mutex becomes the active priority
1713 * of the thread we just dequeued.
1715 mutex->m_prio = pthread->active_priority;
1717 /* Save the owning threads inherited priority: */
1718 mutex->m_saved_prio = pthread->inherited_priority;
1721 * The owning threads inherited priority now becomes
1722 * his active priority (the priority of the mutex).
1724 pthread->inherited_priority = mutex->m_prio;
1727 case PTHREAD_PRIO_PROTECT:
1728 if (pthread->active_priority > mutex->m_prio) {
1730 * Either the mutex ceiling priority has
1731 * been lowered and/or this threads priority
1732 * has been raised subsequent to the thread
1733 * being queued on the waiting list.
1735 pthread->error = EINVAL;
1739 * Assign the new owner and add the mutex
1740 * to the thread's list of owned mutexes.
1742 mutex->m_owner = pthread;
1743 TAILQ_INSERT_TAIL(&pthread->mutexq,
1746 /* Track number of priority mutexes owned: */
1747 pthread->priority_mutex_count++;
1750 * Save the owning threads inherited
1753 mutex->m_saved_prio =
1754 pthread->inherited_priority;
1757 * The owning thread inherits the ceiling
1758 * priority of the mutex and executes at
1761 pthread->inherited_priority = mutex->m_prio;
1762 pthread->active_priority = mutex->m_prio;
1768 /* Make the thread runnable and unlock the scheduling queue: */
1769 kmbx = _thr_setrunnable_unlocked(pthread);
1771 /* Add a preemption point. */
1772 if ((curthread->kseg == pthread->kseg) &&
1773 (pthread->active_priority > curthread->active_priority))
1774 curthread->critical_yield = 1;
1776 if (mutex->m_owner == pthread) {
1777 /* We're done; a valid owner was found. */
1778 if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1779 THR_CRITICAL_ENTER(pthread);
1780 THR_SCHED_UNLOCK(curthread, pthread);
1783 THR_SCHED_UNLOCK(curthread, pthread);
1784 /* Get the next thread from the waiting queue: */
1785 pthread = TAILQ_NEXT(pthread, sqe);
1788 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1789 /* This mutex has no priority: */
1795 * Dequeue a waiting thread from the head of a mutex queue in descending
1798 static inline pthread_t
1799 mutex_queue_deq(struct pthread_mutex *mutex)
1803 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1804 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1805 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1808 * Only exit the loop if the thread hasn't been
1811 if (pthread->interrupted == 0)
1819 * Remove a waiting thread from a mutex queue in descending priority order.
1822 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1824 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1825 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1826 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1831 * Enqueue a waiting thread to a queue in descending priority order.
1834 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1836 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1838 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1840 * For the common case of all threads having equal priority,
1841 * we perform a quick check against the priority of the thread
1842 * at the tail of the queue.
1844 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1845 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1847 tid = TAILQ_FIRST(&mutex->m_queue);
1848 while (pthread->active_priority <= tid->active_priority)
1849 tid = TAILQ_NEXT(tid, sqe);
1850 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1852 pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1856 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
1858 struct pthread *curthread = _get_curthread();
1860 return ((*mutex)->m_owner == curthread);