2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/queue.h>
37 #include "thr_private.h"
39 #if defined(_PTHREADS_INVARIANTS)
40 #define MUTEX_INIT_LINK(m) do { \
41 (m)->m_qe.tqe_prev = NULL; \
42 (m)->m_qe.tqe_next = NULL; \
44 #define MUTEX_ASSERT_IS_OWNED(m) do { \
45 if ((m)->m_qe.tqe_prev == NULL) \
46 PANIC("mutex is not on list"); \
48 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
49 if (((m)->m_qe.tqe_prev != NULL) || \
50 ((m)->m_qe.tqe_next != NULL)) \
51 PANIC("mutex is on list"); \
53 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
54 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
55 "thread in syncq when it shouldn't be."); \
58 #define MUTEX_INIT_LINK(m)
59 #define MUTEX_ASSERT_IS_OWNED(m)
60 #define MUTEX_ASSERT_NOT_OWNED(m)
61 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
64 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
65 #define MUTEX_DESTROY(m) do { \
66 _lock_destroy(&(m)->m_lock); \
74 static struct kse_mailbox *mutex_handoff(struct pthread *,
75 struct pthread_mutex *);
76 static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t);
77 static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
78 static int mutex_unlock_common(pthread_mutex_t *, int);
79 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
80 static void mutex_rescan_owned (struct pthread *, struct pthread *,
81 struct pthread_mutex *);
82 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
83 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
84 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
85 static void mutex_lock_backout(void *arg);
87 static struct pthread_mutex_attr static_mutex_attr =
88 PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
89 static pthread_mutexattr_t static_mattr = &static_mutex_attr;
91 LT10_COMPAT_PRIVATE(__pthread_mutex_init);
92 LT10_COMPAT_PRIVATE(_pthread_mutex_init);
93 LT10_COMPAT_DEFAULT(pthread_mutex_init);
94 LT10_COMPAT_PRIVATE(__pthread_mutex_lock);
95 LT10_COMPAT_PRIVATE(_pthread_mutex_lock);
96 LT10_COMPAT_DEFAULT(pthread_mutex_lock);
97 LT10_COMPAT_PRIVATE(__pthread_mutex_timedlock);
98 LT10_COMPAT_PRIVATE(_pthread_mutex_timedlock);
99 LT10_COMPAT_DEFAULT(pthread_mutex_timedlock);
100 LT10_COMPAT_PRIVATE(__pthread_mutex_trylock);
101 LT10_COMPAT_PRIVATE(_pthread_mutex_trylock);
102 LT10_COMPAT_DEFAULT(pthread_mutex_trylock);
103 LT10_COMPAT_PRIVATE(_pthread_mutex_destroy);
104 LT10_COMPAT_DEFAULT(pthread_mutex_destroy);
105 LT10_COMPAT_PRIVATE(_pthread_mutex_unlock);
106 LT10_COMPAT_DEFAULT(pthread_mutex_unlock);
108 /* Single underscore versions provided for libc internal usage: */
109 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
110 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
111 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
112 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
114 /* No difference between libc and application usage of these: */
115 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
116 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
119 thr_mutex_init(pthread_mutex_t *mutex,
120 const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
122 struct pthread_mutex *pmutex;
123 enum pthread_mutextype type;
132 /* Check if default mutex attributes: */
133 else if (mutex_attr == NULL || *mutex_attr == NULL) {
134 /* Default to a (error checking) POSIX mutex: */
135 type = PTHREAD_MUTEX_ERRORCHECK;
136 protocol = PTHREAD_PRIO_NONE;
137 ceiling = THR_MAX_PRIORITY;
141 /* Check mutex type: */
142 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
143 ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
144 /* Return an invalid argument error: */
147 /* Check mutex protocol: */
148 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
149 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
150 /* Return an invalid argument error: */
154 /* Use the requested mutex type and protocol: */
155 type = (*mutex_attr)->m_type;
156 protocol = (*mutex_attr)->m_protocol;
157 ceiling = (*mutex_attr)->m_ceiling;
158 flags = (*mutex_attr)->m_flags;
161 /* Check no errors so far: */
163 if ((pmutex = (pthread_mutex_t)
164 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
166 else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
167 _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
172 /* Set the mutex flags: */
173 pmutex->m_flags = flags;
175 /* Process according to mutex type: */
177 /* case PTHREAD_MUTEX_DEFAULT: */
178 case PTHREAD_MUTEX_ERRORCHECK:
179 case PTHREAD_MUTEX_NORMAL:
180 case PTHREAD_MUTEX_ADAPTIVE_NP:
181 /* Nothing to do here. */
184 /* Single UNIX Spec 2 recursive mutex: */
185 case PTHREAD_MUTEX_RECURSIVE:
186 /* Reset the mutex count: */
190 /* Trap invalid mutex types: */
192 /* Return an invalid argument error: */
197 /* Initialise the rest of the mutex: */
198 TAILQ_INIT(&pmutex->m_queue);
199 pmutex->m_flags |= MUTEX_FLAGS_INITED;
200 pmutex->m_owner = NULL;
201 pmutex->m_type = type;
202 pmutex->m_protocol = protocol;
203 pmutex->m_refcount = 0;
204 if (protocol == PTHREAD_PRIO_PROTECT)
205 pmutex->m_prio = ceiling;
208 pmutex->m_saved_prio = 0;
209 MUTEX_INIT_LINK(pmutex);
212 /* Free the mutex lock structure: */
213 MUTEX_DESTROY(pmutex);
218 /* Return the completion status: */
223 __pthread_mutex_init(pthread_mutex_t *mutex,
224 const pthread_mutexattr_t *mutex_attr)
227 return (thr_mutex_init(mutex, mutex_attr, calloc));
231 _pthread_mutex_init(pthread_mutex_t *mutex,
232 const pthread_mutexattr_t *mutex_attr)
234 struct pthread_mutex_attr mattr, *mattrp;
236 if ((mutex_attr == NULL) || (*mutex_attr == NULL))
237 return (__pthread_mutex_init(mutex, &static_mattr));
239 mattr = **mutex_attr;
240 mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
242 return (__pthread_mutex_init(mutex, &mattrp));
246 /* This function is used internally by malloc. */
248 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
249 void *(calloc_cb)(size_t, size_t))
251 static const struct pthread_mutex_attr attr = {
252 .m_type = PTHREAD_MUTEX_NORMAL,
253 .m_protocol = PTHREAD_PRIO_NONE,
257 static const struct pthread_mutex_attr *pattr = &attr;
259 return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&pattr,
264 _thr_mutex_reinit(pthread_mutex_t *mutex)
266 _lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
267 _thr_lock_wait, _thr_lock_wakeup);
268 TAILQ_INIT(&(*mutex)->m_queue);
269 (*mutex)->m_owner = NULL;
270 (*mutex)->m_count = 0;
271 (*mutex)->m_refcount = 0;
272 (*mutex)->m_prio = 0;
273 (*mutex)->m_saved_prio = 0;
277 _pthread_mutex_destroy(pthread_mutex_t *mutex)
279 struct pthread *curthread = _get_curthread();
283 if (mutex == NULL || *mutex == NULL)
286 /* Lock the mutex structure: */
287 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
290 * Check to see if this mutex is in use:
292 if (((*mutex)->m_owner != NULL) ||
293 (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
294 ((*mutex)->m_refcount != 0)) {
297 /* Unlock the mutex structure: */
298 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
301 * Save a pointer to the mutex so it can be free'd
302 * and set the caller's pointer to NULL:
307 /* Unlock the mutex structure: */
308 THR_LOCK_RELEASE(curthread, &m->m_lock);
311 * Free the memory allocated for the mutex
314 MUTEX_ASSERT_NOT_OWNED(m);
319 /* Return the completion status: */
324 init_static(struct pthread *thread, pthread_mutex_t *mutex)
328 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
331 ret = pthread_mutex_init(mutex, NULL);
335 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
341 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
345 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
348 ret = pthread_mutex_init(mutex, &static_mattr);
352 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
358 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
363 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
364 "Uninitialized mutex in pthread_mutex_trylock_basic");
366 /* Lock the mutex structure: */
367 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
368 private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
371 * If the mutex was statically allocated, properly
372 * initialize the tail queue.
374 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
375 TAILQ_INIT(&(*mutex)->m_queue);
376 MUTEX_INIT_LINK(*mutex);
377 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
380 /* Process according to mutex type: */
381 switch ((*mutex)->m_protocol) {
382 /* Default POSIX mutex: */
383 case PTHREAD_PRIO_NONE:
384 /* Check if this mutex is not locked: */
385 if ((*mutex)->m_owner == NULL) {
386 /* Lock the mutex for the running thread: */
387 (*mutex)->m_owner = curthread;
389 /* Add to the list of owned mutexes: */
390 MUTEX_ASSERT_NOT_OWNED(*mutex);
391 TAILQ_INSERT_TAIL(&curthread->mutexq,
393 } else if ((*mutex)->m_owner == curthread)
394 ret = mutex_self_trylock(curthread, *mutex);
396 /* Return a busy error: */
400 /* POSIX priority inheritence mutex: */
401 case PTHREAD_PRIO_INHERIT:
402 /* Check if this mutex is not locked: */
403 if ((*mutex)->m_owner == NULL) {
404 /* Lock the mutex for the running thread: */
405 (*mutex)->m_owner = curthread;
407 THR_SCHED_LOCK(curthread, curthread);
408 /* Track number of priority mutexes owned: */
409 curthread->priority_mutex_count++;
412 * The mutex takes on the attributes of the
413 * running thread when there are no waiters.
415 (*mutex)->m_prio = curthread->active_priority;
416 (*mutex)->m_saved_prio =
417 curthread->inherited_priority;
418 curthread->inherited_priority = (*mutex)->m_prio;
419 THR_SCHED_UNLOCK(curthread, curthread);
421 /* Add to the list of owned mutexes: */
422 MUTEX_ASSERT_NOT_OWNED(*mutex);
423 TAILQ_INSERT_TAIL(&curthread->mutexq,
425 } else if ((*mutex)->m_owner == curthread)
426 ret = mutex_self_trylock(curthread, *mutex);
428 /* Return a busy error: */
432 /* POSIX priority protection mutex: */
433 case PTHREAD_PRIO_PROTECT:
434 /* Check for a priority ceiling violation: */
435 if (curthread->active_priority > (*mutex)->m_prio)
438 /* Check if this mutex is not locked: */
439 else if ((*mutex)->m_owner == NULL) {
440 /* Lock the mutex for the running thread: */
441 (*mutex)->m_owner = curthread;
443 THR_SCHED_LOCK(curthread, curthread);
444 /* Track number of priority mutexes owned: */
445 curthread->priority_mutex_count++;
448 * The running thread inherits the ceiling
449 * priority of the mutex and executes at that
452 curthread->active_priority = (*mutex)->m_prio;
453 (*mutex)->m_saved_prio =
454 curthread->inherited_priority;
455 curthread->inherited_priority =
457 THR_SCHED_UNLOCK(curthread, curthread);
458 /* Add to the list of owned mutexes: */
459 MUTEX_ASSERT_NOT_OWNED(*mutex);
460 TAILQ_INSERT_TAIL(&curthread->mutexq,
462 } else if ((*mutex)->m_owner == curthread)
463 ret = mutex_self_trylock(curthread, *mutex);
465 /* Return a busy error: */
469 /* Trap invalid mutex types: */
471 /* Return an invalid argument error: */
476 if (ret == 0 && private)
477 THR_CRITICAL_ENTER(curthread);
479 /* Unlock the mutex structure: */
480 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
482 /* Return the completion status: */
487 __pthread_mutex_trylock(pthread_mutex_t *mutex)
489 struct pthread *curthread = _get_curthread();
496 * If the mutex is statically initialized, perform the dynamic
499 else if ((*mutex != NULL) ||
500 ((ret = init_static(curthread, mutex)) == 0))
501 ret = mutex_trylock_common(curthread, mutex);
507 _pthread_mutex_trylock(pthread_mutex_t *mutex)
509 struct pthread *curthread = _get_curthread();
516 * If the mutex is statically initialized, perform the dynamic
517 * initialization marking the mutex private (delete safe):
519 else if ((*mutex != NULL) ||
520 ((ret = init_static_private(curthread, mutex)) == 0))
521 ret = mutex_trylock_common(curthread, mutex);
527 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
528 const struct timespec * abstime)
533 THR_ASSERT((m != NULL) && (*m != NULL),
534 "Uninitialized mutex in pthread_mutex_trylock_basic");
536 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
537 abstime->tv_nsec >= 1000000000))
540 /* Reset the interrupted flag: */
541 curthread->interrupted = 0;
542 curthread->timeout = 0;
543 curthread->wakeup_time.tv_sec = -1;
545 private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
548 * Enter a loop waiting to become the mutex owner. We need a
549 * loop in case the waiting thread is interrupted by a signal
550 * to execute a signal handler. It is not (currently) possible
551 * to remain in the waiting queue while running a handler.
552 * Instead, the thread is interrupted and backed out of the
553 * waiting queue prior to executing the signal handler.
556 /* Lock the mutex structure: */
557 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
560 * If the mutex was statically allocated, properly
561 * initialize the tail queue.
563 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
564 TAILQ_INIT(&(*m)->m_queue);
565 (*m)->m_flags |= MUTEX_FLAGS_INITED;
569 /* Process according to mutex type: */
570 switch ((*m)->m_protocol) {
571 /* Default POSIX mutex: */
572 case PTHREAD_PRIO_NONE:
573 if ((*m)->m_owner == NULL) {
574 /* Lock the mutex for this thread: */
575 (*m)->m_owner = curthread;
577 /* Add to the list of owned mutexes: */
578 MUTEX_ASSERT_NOT_OWNED(*m);
579 TAILQ_INSERT_TAIL(&curthread->mutexq,
582 THR_CRITICAL_ENTER(curthread);
584 /* Unlock the mutex structure: */
585 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
586 } else if ((*m)->m_owner == curthread) {
587 ret = mutex_self_lock(curthread, *m);
589 /* Unlock the mutex structure: */
590 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
593 * Join the queue of threads waiting to lock
594 * the mutex and save a pointer to the mutex.
596 mutex_queue_enq(*m, curthread);
597 curthread->data.mutex = *m;
598 curthread->sigbackout = mutex_lock_backout;
600 * This thread is active and is in a critical
601 * region (holding the mutex lock); we should
602 * be able to safely set the state.
604 THR_SCHED_LOCK(curthread, curthread);
605 /* Set the wakeup time: */
607 curthread->wakeup_time.tv_sec =
609 curthread->wakeup_time.tv_nsec =
613 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
614 THR_SCHED_UNLOCK(curthread, curthread);
616 /* Unlock the mutex structure: */
617 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
619 /* Schedule the next thread: */
620 _thr_sched_switch(curthread);
622 if (THR_IN_MUTEXQ(curthread)) {
623 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
624 mutex_queue_remove(*m, curthread);
625 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
628 * Only clear these after assuring the
629 * thread is dequeued.
631 curthread->data.mutex = NULL;
632 curthread->sigbackout = NULL;
636 /* POSIX priority inheritence mutex: */
637 case PTHREAD_PRIO_INHERIT:
638 /* Check if this mutex is not locked: */
639 if ((*m)->m_owner == NULL) {
640 /* Lock the mutex for this thread: */
641 (*m)->m_owner = curthread;
643 THR_SCHED_LOCK(curthread, curthread);
644 /* Track number of priority mutexes owned: */
645 curthread->priority_mutex_count++;
648 * The mutex takes on attributes of the
649 * running thread when there are no waiters.
650 * Make sure the thread's scheduling lock is
651 * held while priorities are adjusted.
653 (*m)->m_prio = curthread->active_priority;
655 curthread->inherited_priority;
656 curthread->inherited_priority = (*m)->m_prio;
657 THR_SCHED_UNLOCK(curthread, curthread);
659 /* Add to the list of owned mutexes: */
660 MUTEX_ASSERT_NOT_OWNED(*m);
661 TAILQ_INSERT_TAIL(&curthread->mutexq,
664 THR_CRITICAL_ENTER(curthread);
666 /* Unlock the mutex structure: */
667 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
668 } else if ((*m)->m_owner == curthread) {
669 ret = mutex_self_lock(curthread, *m);
671 /* Unlock the mutex structure: */
672 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
675 * Join the queue of threads waiting to lock
676 * the mutex and save a pointer to the mutex.
678 mutex_queue_enq(*m, curthread);
679 curthread->data.mutex = *m;
680 curthread->sigbackout = mutex_lock_backout;
683 * This thread is active and is in a critical
684 * region (holding the mutex lock); we should
685 * be able to safely set the state.
687 if (curthread->active_priority > (*m)->m_prio)
688 /* Adjust priorities: */
689 mutex_priority_adjust(curthread, *m);
691 THR_SCHED_LOCK(curthread, curthread);
692 /* Set the wakeup time: */
694 curthread->wakeup_time.tv_sec =
696 curthread->wakeup_time.tv_nsec =
699 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
700 THR_SCHED_UNLOCK(curthread, curthread);
702 /* Unlock the mutex structure: */
703 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
705 /* Schedule the next thread: */
706 _thr_sched_switch(curthread);
708 if (THR_IN_MUTEXQ(curthread)) {
709 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
710 mutex_queue_remove(*m, curthread);
711 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
714 * Only clear these after assuring the
715 * thread is dequeued.
717 curthread->data.mutex = NULL;
718 curthread->sigbackout = NULL;
722 /* POSIX priority protection mutex: */
723 case PTHREAD_PRIO_PROTECT:
724 /* Check for a priority ceiling violation: */
725 if (curthread->active_priority > (*m)->m_prio) {
726 /* Unlock the mutex structure: */
727 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
730 /* Check if this mutex is not locked: */
731 else if ((*m)->m_owner == NULL) {
733 * Lock the mutex for the running
736 (*m)->m_owner = curthread;
738 THR_SCHED_LOCK(curthread, curthread);
739 /* Track number of priority mutexes owned: */
740 curthread->priority_mutex_count++;
743 * The running thread inherits the ceiling
744 * priority of the mutex and executes at that
745 * priority. Make sure the thread's
746 * scheduling lock is held while priorities
749 curthread->active_priority = (*m)->m_prio;
751 curthread->inherited_priority;
752 curthread->inherited_priority = (*m)->m_prio;
753 THR_SCHED_UNLOCK(curthread, curthread);
755 /* Add to the list of owned mutexes: */
756 MUTEX_ASSERT_NOT_OWNED(*m);
757 TAILQ_INSERT_TAIL(&curthread->mutexq,
760 THR_CRITICAL_ENTER(curthread);
762 /* Unlock the mutex structure: */
763 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
764 } else if ((*m)->m_owner == curthread) {
765 ret = mutex_self_lock(curthread, *m);
767 /* Unlock the mutex structure: */
768 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
771 * Join the queue of threads waiting to lock
772 * the mutex and save a pointer to the mutex.
774 mutex_queue_enq(*m, curthread);
775 curthread->data.mutex = *m;
776 curthread->sigbackout = mutex_lock_backout;
778 /* Clear any previous error: */
779 curthread->error = 0;
782 * This thread is active and is in a critical
783 * region (holding the mutex lock); we should
784 * be able to safely set the state.
787 THR_SCHED_LOCK(curthread, curthread);
788 /* Set the wakeup time: */
790 curthread->wakeup_time.tv_sec =
792 curthread->wakeup_time.tv_nsec =
795 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
796 THR_SCHED_UNLOCK(curthread, curthread);
798 /* Unlock the mutex structure: */
799 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
801 /* Schedule the next thread: */
802 _thr_sched_switch(curthread);
804 if (THR_IN_MUTEXQ(curthread)) {
805 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
806 mutex_queue_remove(*m, curthread);
807 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
810 * Only clear these after assuring the
811 * thread is dequeued.
813 curthread->data.mutex = NULL;
814 curthread->sigbackout = NULL;
817 * The threads priority may have changed while
818 * waiting for the mutex causing a ceiling
821 ret = curthread->error;
822 curthread->error = 0;
826 /* Trap invalid mutex types: */
828 /* Unlock the mutex structure: */
829 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
831 /* Return an invalid argument error: */
836 } while (((*m)->m_owner != curthread) && (ret == 0) &&
837 (curthread->interrupted == 0) && (curthread->timeout == 0));
839 if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
843 * Check to see if this thread was interrupted and
844 * is still in the mutex queue of waiting threads:
846 if (curthread->interrupted != 0) {
847 /* Remove this thread from the mutex queue. */
848 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
849 if (THR_IN_SYNCQ(curthread))
850 mutex_queue_remove(*m, curthread);
851 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
853 /* Check for asynchronous cancellation. */
854 if (curthread->continuation != NULL)
855 curthread->continuation((void *) curthread);
858 /* Return the completion status: */
863 __pthread_mutex_lock(pthread_mutex_t *m)
865 struct pthread *curthread;
868 if (_thr_initial == NULL)
869 _libpthread_init(NULL);
871 curthread = _get_curthread();
876 * If the mutex is statically initialized, perform the dynamic
879 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
880 ret = mutex_lock_common(curthread, m, NULL);
885 __strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
888 _pthread_mutex_lock(pthread_mutex_t *m)
890 struct pthread *curthread;
893 if (_thr_initial == NULL)
894 _libpthread_init(NULL);
895 curthread = _get_curthread();
901 * If the mutex is statically initialized, perform the dynamic
902 * initialization marking it private (delete safe):
904 else if ((*m != NULL) ||
905 ((ret = init_static_private(curthread, m)) == 0))
906 ret = mutex_lock_common(curthread, m, NULL);
912 __pthread_mutex_timedlock(pthread_mutex_t *m,
913 const struct timespec *abs_timeout)
915 struct pthread *curthread;
918 if (_thr_initial == NULL)
919 _libpthread_init(NULL);
921 curthread = _get_curthread();
926 * If the mutex is statically initialized, perform the dynamic
929 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
930 ret = mutex_lock_common(curthread, m, abs_timeout);
936 _pthread_mutex_timedlock(pthread_mutex_t *m,
937 const struct timespec *abs_timeout)
939 struct pthread *curthread;
942 if (_thr_initial == NULL)
943 _libpthread_init(NULL);
944 curthread = _get_curthread();
950 * If the mutex is statically initialized, perform the dynamic
951 * initialization marking it private (delete safe):
953 else if ((*m != NULL) ||
954 ((ret = init_static_private(curthread, m)) == 0))
955 ret = mutex_lock_common(curthread, m, abs_timeout);
961 _pthread_mutex_unlock(pthread_mutex_t *m)
963 return (mutex_unlock_common(m, /* add reference */ 0));
966 __strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
969 _mutex_cv_unlock(pthread_mutex_t *m)
971 return (mutex_unlock_common(m, /* add reference */ 1));
975 _mutex_cv_lock(pthread_mutex_t *m)
977 struct pthread *curthread;
980 curthread = _get_curthread();
981 if ((ret = _pthread_mutex_lock(m)) == 0) {
982 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
984 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
990 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
995 /* case PTHREAD_MUTEX_DEFAULT: */
996 case PTHREAD_MUTEX_ERRORCHECK:
997 case PTHREAD_MUTEX_NORMAL:
998 case PTHREAD_MUTEX_ADAPTIVE_NP:
1002 case PTHREAD_MUTEX_RECURSIVE:
1003 /* Increment the lock count: */
1008 /* Trap invalid mutex types; */
1016 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
1021 * Don't allow evil recursive mutexes for private use
1022 * in libc and libpthread.
1024 if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1025 PANIC("Recurse on a private mutex.");
1027 switch (m->m_type) {
1028 /* case PTHREAD_MUTEX_DEFAULT: */
1029 case PTHREAD_MUTEX_ERRORCHECK:
1030 case PTHREAD_MUTEX_ADAPTIVE_NP:
1032 * POSIX specifies that mutexes should return EDEADLK if a
1033 * recursive lock is detected.
1038 case PTHREAD_MUTEX_NORMAL:
1040 * What SS2 define as a 'normal' mutex. Intentionally
1041 * deadlock on attempts to get a lock you already own.
1044 THR_SCHED_LOCK(curthread, curthread);
1045 THR_SET_STATE(curthread, PS_DEADLOCK);
1046 THR_SCHED_UNLOCK(curthread, curthread);
1048 /* Unlock the mutex structure: */
1049 THR_LOCK_RELEASE(curthread, &m->m_lock);
1051 /* Schedule the next thread: */
1052 _thr_sched_switch(curthread);
1055 case PTHREAD_MUTEX_RECURSIVE:
1056 /* Increment the lock count: */
1061 /* Trap invalid mutex types; */
1069 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1071 struct pthread *curthread = _get_curthread();
1072 struct kse_mailbox *kmbx = NULL;
1075 if (m == NULL || *m == NULL)
1078 /* Lock the mutex structure: */
1079 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1081 /* Process according to mutex type: */
1082 switch ((*m)->m_protocol) {
1083 /* Default POSIX mutex: */
1084 case PTHREAD_PRIO_NONE:
1086 * Check if the running thread is not the owner of the
1089 if ((*m)->m_owner != curthread)
1091 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1092 ((*m)->m_count > 0))
1093 /* Decrement the count: */
1097 * Clear the count in case this is a recursive
1102 /* Remove the mutex from the threads queue. */
1103 MUTEX_ASSERT_IS_OWNED(*m);
1104 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1106 MUTEX_INIT_LINK(*m);
1109 * Hand off the mutex to the next waiting
1112 kmbx = mutex_handoff(curthread, *m);
1116 /* POSIX priority inheritence mutex: */
1117 case PTHREAD_PRIO_INHERIT:
1119 * Check if the running thread is not the owner of the
1122 if ((*m)->m_owner != curthread)
1124 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1125 ((*m)->m_count > 0))
1126 /* Decrement the count: */
1130 * Clear the count in case this is recursive
1136 * Restore the threads inherited priority and
1137 * recompute the active priority (being careful
1138 * not to override changes in the threads base
1139 * priority subsequent to locking the mutex).
1141 THR_SCHED_LOCK(curthread, curthread);
1142 curthread->inherited_priority =
1144 curthread->active_priority =
1145 MAX(curthread->inherited_priority,
1146 curthread->base_priority);
1149 * This thread now owns one less priority mutex.
1151 curthread->priority_mutex_count--;
1152 THR_SCHED_UNLOCK(curthread, curthread);
1154 /* Remove the mutex from the threads queue. */
1155 MUTEX_ASSERT_IS_OWNED(*m);
1156 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1158 MUTEX_INIT_LINK(*m);
1161 * Hand off the mutex to the next waiting
1164 kmbx = mutex_handoff(curthread, *m);
1168 /* POSIX priority ceiling mutex: */
1169 case PTHREAD_PRIO_PROTECT:
1171 * Check if the running thread is not the owner of the
1174 if ((*m)->m_owner != curthread)
1176 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1177 ((*m)->m_count > 0))
1178 /* Decrement the count: */
1182 * Clear the count in case this is a recursive
1188 * Restore the threads inherited priority and
1189 * recompute the active priority (being careful
1190 * not to override changes in the threads base
1191 * priority subsequent to locking the mutex).
1193 THR_SCHED_LOCK(curthread, curthread);
1194 curthread->inherited_priority =
1196 curthread->active_priority =
1197 MAX(curthread->inherited_priority,
1198 curthread->base_priority);
1201 * This thread now owns one less priority mutex.
1203 curthread->priority_mutex_count--;
1204 THR_SCHED_UNLOCK(curthread, curthread);
1206 /* Remove the mutex from the threads queue. */
1207 MUTEX_ASSERT_IS_OWNED(*m);
1208 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1210 MUTEX_INIT_LINK(*m);
1213 * Hand off the mutex to the next waiting
1216 kmbx = mutex_handoff(curthread, *m);
1220 /* Trap invalid mutex types: */
1222 /* Return an invalid argument error: */
1227 if ((ret == 0) && (add_reference != 0))
1228 /* Increment the reference count: */
1231 /* Leave the critical region if this is a private mutex. */
1232 if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1233 THR_CRITICAL_LEAVE(curthread);
1235 /* Unlock the mutex structure: */
1236 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1242 /* Return the completion status: */
1248 * This function is called when a change in base priority occurs for
1249 * a thread that is holding or waiting for a priority protection or
1250 * inheritence mutex. A change in a threads base priority can effect
1251 * changes to active priorities of other threads and to the ordering
1252 * of mutex locking by waiting threads.
1254 * This must be called without the target thread's scheduling lock held.
1257 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1260 struct pthread_mutex *m;
1262 /* Adjust the priorites of any owned priority mutexes: */
1263 if (pthread->priority_mutex_count > 0) {
1265 * Rescan the mutexes owned by this thread and correct
1266 * their priorities to account for this threads change
1267 * in priority. This has the side effect of changing
1268 * the threads active priority.
1270 * Be sure to lock the first mutex in the list of owned
1271 * mutexes. This acts as a barrier against another
1272 * simultaneous call to change the threads priority
1273 * and from the owning thread releasing the mutex.
1275 m = TAILQ_FIRST(&pthread->mutexq);
1277 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1279 * Make sure the thread still owns the lock.
1281 if (m == TAILQ_FIRST(&pthread->mutexq))
1282 mutex_rescan_owned(curthread, pthread,
1283 /* rescan all owned */ NULL);
1284 THR_LOCK_RELEASE(curthread, &m->m_lock);
1289 * If this thread is waiting on a priority inheritence mutex,
1290 * check for priority adjustments. A change in priority can
1291 * also cause a ceiling violation(*) for a thread waiting on
1292 * a priority protection mutex; we don't perform the check here
1293 * as it is done in pthread_mutex_unlock.
1295 * (*) It should be noted that a priority change to a thread
1296 * _after_ taking and owning a priority ceiling mutex
1297 * does not affect ownership of that mutex; the ceiling
1298 * priority is only checked before mutex ownership occurs.
1300 if (propagate_prio != 0) {
1302 * Lock the thread's scheduling queue. This is a bit
1303 * convoluted; the "in synchronization queue flag" can
1304 * only be cleared with both the thread's scheduling and
1305 * mutex locks held. The thread's pointer to the wanted
1306 * mutex is guaranteed to be valid during this time.
1308 THR_SCHED_LOCK(curthread, pthread);
1310 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1311 ((m = pthread->data.mutex) == NULL))
1312 THR_SCHED_UNLOCK(curthread, pthread);
1315 * This thread is currently waiting on a mutex; unlock
1316 * the scheduling queue lock and lock the mutex. We
1317 * can't hold both at the same time because the locking
1318 * order could cause a deadlock.
1320 THR_SCHED_UNLOCK(curthread, pthread);
1321 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1324 * Check to make sure this thread is still in the
1325 * same state (the lock above can yield the CPU to
1326 * another thread or the thread may be running on
1329 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1330 (pthread->data.mutex == m)) {
1332 * Remove and reinsert this thread into
1333 * the list of waiting threads to preserve
1334 * decreasing priority order.
1336 mutex_queue_remove(m, pthread);
1337 mutex_queue_enq(m, pthread);
1339 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1340 /* Adjust priorities: */
1341 mutex_priority_adjust(curthread, m);
1344 /* Unlock the mutex structure: */
1345 THR_LOCK_RELEASE(curthread, &m->m_lock);
1351 * Called when a new thread is added to the mutex waiting queue or
1352 * when a threads priority changes that is already in the mutex
1355 * This must be called with the mutex locked by the current thread.
1358 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1360 pthread_mutex_t m = mutex;
1361 struct pthread *pthread_next, *pthread = mutex->m_owner;
1362 int done, temp_prio;
1365 * Calculate the mutex priority as the maximum of the highest
1366 * active priority of any waiting threads and the owning threads
1367 * active priority(*).
1369 * (*) Because the owning threads current active priority may
1370 * reflect priority inherited from this mutex (and the mutex
1371 * priority may have changed) we must recalculate the active
1372 * priority based on the threads saved inherited priority
1373 * and its base priority.
1375 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1376 temp_prio = MAX(pthread_next->active_priority,
1377 MAX(m->m_saved_prio, pthread->base_priority));
1379 /* See if this mutex really needs adjusting: */
1380 if (temp_prio == m->m_prio)
1381 /* No need to propagate the priority: */
1384 /* Set new priority of the mutex: */
1385 m->m_prio = temp_prio;
1388 * Don't unlock the mutex passed in as an argument. It is
1389 * expected to be locked and unlocked by the caller.
1394 * Save the threads priority before rescanning the
1397 temp_prio = pthread->active_priority;
1400 * Fix the priorities for all mutexes held by the owning
1401 * thread since taking this mutex. This also has a
1402 * potential side-effect of changing the threads priority.
1404 * At this point the mutex is locked by the current thread.
1405 * The owning thread can't release the mutex until it is
1406 * unlocked, so we should be able to safely walk its list
1409 mutex_rescan_owned(curthread, pthread, m);
1412 * If this isn't the first time through the loop,
1413 * the current mutex needs to be unlocked.
1416 THR_LOCK_RELEASE(curthread, &m->m_lock);
1418 /* Assume we're done unless told otherwise: */
1422 * If the thread is currently waiting on a mutex, check
1423 * to see if the threads new priority has affected the
1424 * priority of the mutex.
1426 if ((temp_prio != pthread->active_priority) &&
1427 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1428 ((m = pthread->data.mutex) != NULL) &&
1429 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1430 /* Lock the mutex structure: */
1431 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1434 * Make sure the thread is still waiting on the
1437 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1438 (m == pthread->data.mutex)) {
1440 * The priority for this thread has changed.
1441 * Remove and reinsert this thread into the
1442 * list of waiting threads to preserve
1443 * decreasing priority order.
1445 mutex_queue_remove(m, pthread);
1446 mutex_queue_enq(m, pthread);
1449 * Grab the waiting thread with highest
1452 pthread_next = TAILQ_FIRST(&m->m_queue);
1455 * Calculate the mutex priority as the maximum
1456 * of the highest active priority of any
1457 * waiting threads and the owning threads
1460 temp_prio = MAX(pthread_next->active_priority,
1461 MAX(m->m_saved_prio,
1462 m->m_owner->base_priority));
1464 if (temp_prio != m->m_prio) {
1466 * The priority needs to be propagated
1467 * to the mutex this thread is waiting
1468 * on and up to the owner of that mutex.
1470 m->m_prio = temp_prio;
1471 pthread = m->m_owner;
1473 /* We're not done yet: */
1477 /* Only release the mutex if we're done: */
1479 THR_LOCK_RELEASE(curthread, &m->m_lock);
1481 } while (done == 0);
1485 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1486 struct pthread_mutex *mutex)
1488 struct pthread_mutex *m;
1489 struct pthread *pthread_next;
1490 int active_prio, inherited_prio;
1493 * Start walking the mutexes the thread has taken since
1494 * taking this mutex.
1496 if (mutex == NULL) {
1498 * A null mutex means start at the beginning of the owned
1501 m = TAILQ_FIRST(&pthread->mutexq);
1503 /* There is no inherited priority yet. */
1507 * The caller wants to start after a specific mutex. It
1508 * is assumed that this mutex is a priority inheritence
1509 * mutex and that its priority has been correctly
1512 m = TAILQ_NEXT(mutex, m_qe);
1514 /* Start inheriting priority from the specified mutex. */
1515 inherited_prio = mutex->m_prio;
1517 active_prio = MAX(inherited_prio, pthread->base_priority);
1519 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1521 * We only want to deal with priority inheritence
1522 * mutexes. This might be optimized by only placing
1523 * priority inheritence mutexes into the owned mutex
1524 * list, but it may prove to be useful having all
1525 * owned mutexes in this list. Consider a thread
1526 * exiting while holding mutexes...
1528 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1530 * Fix the owners saved (inherited) priority to
1531 * reflect the priority of the previous mutex.
1533 m->m_saved_prio = inherited_prio;
1535 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1536 /* Recalculate the priority of the mutex: */
1537 m->m_prio = MAX(active_prio,
1538 pthread_next->active_priority);
1540 m->m_prio = active_prio;
1542 /* Recalculate new inherited and active priorities: */
1543 inherited_prio = m->m_prio;
1544 active_prio = MAX(m->m_prio, pthread->base_priority);
1549 * Fix the threads inherited priority and recalculate its
1552 pthread->inherited_priority = inherited_prio;
1553 active_prio = MAX(inherited_prio, pthread->base_priority);
1555 if (active_prio != pthread->active_priority) {
1556 /* Lock the thread's scheduling queue: */
1557 THR_SCHED_LOCK(curthread, pthread);
1559 if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1561 * This thread is not in a run queue. Just set
1562 * its active priority.
1564 pthread->active_priority = active_prio;
1568 * This thread is in a run queue. Remove it from
1569 * the queue before changing its priority:
1571 THR_RUNQ_REMOVE(pthread);
1574 * POSIX states that if the priority is being
1575 * lowered, the thread must be inserted at the
1576 * head of the queue for its priority if it owns
1577 * any priority protection or inheritence mutexes.
1579 if ((active_prio < pthread->active_priority) &&
1580 (pthread->priority_mutex_count > 0)) {
1581 /* Set the new active priority. */
1582 pthread->active_priority = active_prio;
1584 THR_RUNQ_INSERT_HEAD(pthread);
1586 /* Set the new active priority. */
1587 pthread->active_priority = active_prio;
1589 THR_RUNQ_INSERT_TAIL(pthread);
1592 THR_SCHED_UNLOCK(curthread, pthread);
1597 _mutex_unlock_private(pthread_t pthread)
1599 struct pthread_mutex *m, *m_next;
1601 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1602 m_next = TAILQ_NEXT(m, m_qe);
1603 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1604 pthread_mutex_unlock(&m);
1609 * This is called by the current thread when it wants to back out of a
1610 * mutex_lock in order to run a signal handler.
1613 mutex_lock_backout(void *arg)
1615 struct pthread *curthread = (struct pthread *)arg;
1616 struct pthread_mutex *m;
1618 if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1620 * Any other thread may clear the "in sync queue flag",
1621 * but only the current thread can clear the pointer
1622 * to the mutex. So if the flag is set, we can
1623 * guarantee that the pointer to the mutex is valid.
1624 * The only problem may be if the mutex is destroyed
1625 * out from under us, but that should be considered
1626 * an application bug.
1628 m = curthread->data.mutex;
1630 /* Lock the mutex structure: */
1631 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1635 * Check to make sure this thread doesn't already own
1636 * the mutex. Since mutexes are unlocked with direct
1637 * handoffs, it is possible the previous owner gave it
1638 * to us after we checked the sync queue flag and before
1639 * we locked the mutex structure.
1641 if (m->m_owner == curthread) {
1642 THR_LOCK_RELEASE(curthread, &m->m_lock);
1643 mutex_unlock_common(&m, /* add_reference */ 0);
1646 * Remove ourselves from the mutex queue and
1647 * clear the pointer to the mutex. We may no
1648 * longer be in the mutex queue, but the removal
1649 * function will DTRT.
1651 mutex_queue_remove(m, curthread);
1652 curthread->data.mutex = NULL;
1653 THR_LOCK_RELEASE(curthread, &m->m_lock);
1656 /* No need to call this again. */
1657 curthread->sigbackout = NULL;
1661 * Dequeue a waiting thread from the head of a mutex queue in descending
1664 * In order to properly dequeue a thread from the mutex queue and
1665 * make it runnable without the possibility of errant wakeups, it
1666 * is necessary to lock the thread's scheduling queue while also
1667 * holding the mutex lock.
1669 static struct kse_mailbox *
1670 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1672 struct kse_mailbox *kmbx = NULL;
1673 struct pthread *pthread;
1675 /* Keep dequeueing until we find a valid thread: */
1676 mutex->m_owner = NULL;
1677 pthread = TAILQ_FIRST(&mutex->m_queue);
1678 while (pthread != NULL) {
1679 /* Take the thread's scheduling lock: */
1680 THR_SCHED_LOCK(curthread, pthread);
1682 /* Remove the thread from the mutex queue: */
1683 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1684 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1687 * Only exit the loop if the thread hasn't been
1690 switch (mutex->m_protocol) {
1691 case PTHREAD_PRIO_NONE:
1693 * Assign the new owner and add the mutex to the
1694 * thread's list of owned mutexes.
1696 mutex->m_owner = pthread;
1697 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1700 case PTHREAD_PRIO_INHERIT:
1702 * Assign the new owner and add the mutex to the
1703 * thread's list of owned mutexes.
1705 mutex->m_owner = pthread;
1706 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1708 /* Track number of priority mutexes owned: */
1709 pthread->priority_mutex_count++;
1712 * Set the priority of the mutex. Since our waiting
1713 * threads are in descending priority order, the
1714 * priority of the mutex becomes the active priority
1715 * of the thread we just dequeued.
1717 mutex->m_prio = pthread->active_priority;
1719 /* Save the owning threads inherited priority: */
1720 mutex->m_saved_prio = pthread->inherited_priority;
1723 * The owning threads inherited priority now becomes
1724 * his active priority (the priority of the mutex).
1726 pthread->inherited_priority = mutex->m_prio;
1729 case PTHREAD_PRIO_PROTECT:
1730 if (pthread->active_priority > mutex->m_prio) {
1732 * Either the mutex ceiling priority has
1733 * been lowered and/or this threads priority
1734 * has been raised subsequent to the thread
1735 * being queued on the waiting list.
1737 pthread->error = EINVAL;
1741 * Assign the new owner and add the mutex
1742 * to the thread's list of owned mutexes.
1744 mutex->m_owner = pthread;
1745 TAILQ_INSERT_TAIL(&pthread->mutexq,
1748 /* Track number of priority mutexes owned: */
1749 pthread->priority_mutex_count++;
1752 * Save the owning threads inherited
1755 mutex->m_saved_prio =
1756 pthread->inherited_priority;
1759 * The owning thread inherits the ceiling
1760 * priority of the mutex and executes at
1763 pthread->inherited_priority = mutex->m_prio;
1764 pthread->active_priority = mutex->m_prio;
1770 /* Make the thread runnable and unlock the scheduling queue: */
1771 kmbx = _thr_setrunnable_unlocked(pthread);
1773 /* Add a preemption point. */
1774 if ((curthread->kseg == pthread->kseg) &&
1775 (pthread->active_priority > curthread->active_priority))
1776 curthread->critical_yield = 1;
1778 if (mutex->m_owner == pthread) {
1779 /* We're done; a valid owner was found. */
1780 if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1781 THR_CRITICAL_ENTER(pthread);
1782 THR_SCHED_UNLOCK(curthread, pthread);
1785 THR_SCHED_UNLOCK(curthread, pthread);
1786 /* Get the next thread from the waiting queue: */
1787 pthread = TAILQ_NEXT(pthread, sqe);
1790 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1791 /* This mutex has no priority: */
1797 * Dequeue a waiting thread from the head of a mutex queue in descending
1800 static inline pthread_t
1801 mutex_queue_deq(struct pthread_mutex *mutex)
1805 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1806 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1807 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1810 * Only exit the loop if the thread hasn't been
1813 if (pthread->interrupted == 0)
1821 * Remove a waiting thread from a mutex queue in descending priority order.
1824 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1826 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1827 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1828 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1833 * Enqueue a waiting thread to a queue in descending priority order.
1836 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1838 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1840 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1842 * For the common case of all threads having equal priority,
1843 * we perform a quick check against the priority of the thread
1844 * at the tail of the queue.
1846 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1847 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1849 tid = TAILQ_FIRST(&mutex->m_queue);
1850 while (pthread->active_priority <= tid->active_priority)
1851 tid = TAILQ_NEXT(tid, sqe);
1852 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1854 pthread->sflags |= THR_FLAGS_IN_SYNCQ;