2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/queue.h>
40 #include "pthread_private.h"
42 #if defined(_PTHREADS_INVARIANTS)
43 #define _MUTEX_INIT_LINK(m) do { \
44 (m)->m_qe.tqe_prev = NULL; \
45 (m)->m_qe.tqe_next = NULL; \
47 #define _MUTEX_ASSERT_IS_OWNED(m) do { \
48 if ((m)->m_qe.tqe_prev == NULL) \
49 PANIC("mutex is not on list"); \
51 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \
52 if (((m)->m_qe.tqe_prev != NULL) || \
53 ((m)->m_qe.tqe_next != NULL)) \
54 PANIC("mutex is on list"); \
57 #define _MUTEX_INIT_LINK(m)
58 #define _MUTEX_ASSERT_IS_OWNED(m)
59 #define _MUTEX_ASSERT_NOT_OWNED(m)
65 static inline int mutex_self_trylock(pthread_mutex_t);
66 static inline int mutex_self_lock(pthread_mutex_t);
67 static inline int mutex_unlock_common(pthread_mutex_t *, int);
68 static void mutex_priority_adjust(pthread_mutex_t);
69 static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
70 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
71 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
72 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
75 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
77 static struct pthread_mutex_attr static_mutex_attr =
78 PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
79 static pthread_mutexattr_t static_mattr = &static_mutex_attr;
81 /* Single underscore versions provided for libc internal usage: */
82 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
85 /* No difference between libc and application usage of these: */
86 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
87 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
88 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
92 * Reinitialize a private mutex; this is only used for internal mutexes.
95 _mutex_reinit(pthread_mutex_t * mutex)
101 else if (*mutex == NULL)
102 ret = _pthread_mutex_init(mutex, NULL);
105 * Initialize the mutex structure:
107 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
108 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
109 TAILQ_INIT(&(*mutex)->m_queue);
110 (*mutex)->m_owner = NULL;
111 (*mutex)->m_data.m_count = 0;
112 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
113 (*mutex)->m_refcount = 0;
114 (*mutex)->m_prio = 0;
115 (*mutex)->m_saved_prio = 0;
116 _MUTEX_INIT_LINK(*mutex);
117 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
123 _pthread_mutex_init(pthread_mutex_t * mutex,
124 const pthread_mutexattr_t * mutex_attr)
126 enum pthread_mutextype type;
130 pthread_mutex_t pmutex;
136 /* Check if default mutex attributes: */
137 if (mutex_attr == NULL || *mutex_attr == NULL) {
138 /* Default to a (error checking) POSIX mutex: */
139 type = PTHREAD_MUTEX_ERRORCHECK;
140 protocol = PTHREAD_PRIO_NONE;
141 ceiling = PTHREAD_MAX_PRIORITY;
145 /* Check mutex type: */
146 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
147 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
148 /* Return an invalid argument error: */
151 /* Check mutex protocol: */
152 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
153 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
154 /* Return an invalid argument error: */
158 /* Use the requested mutex type and protocol: */
159 type = (*mutex_attr)->m_type;
160 protocol = (*mutex_attr)->m_protocol;
161 ceiling = (*mutex_attr)->m_ceiling;
162 flags = (*mutex_attr)->m_flags;
165 /* Check no errors so far: */
167 if ((pmutex = (pthread_mutex_t)
168 malloc(sizeof(struct pthread_mutex))) == NULL)
171 /* Set the mutex flags: */
172 pmutex->m_flags = flags;
174 /* Process according to mutex type: */
176 /* case PTHREAD_MUTEX_DEFAULT: */
177 case PTHREAD_MUTEX_ERRORCHECK:
178 case PTHREAD_MUTEX_NORMAL:
179 /* Nothing to do here. */
182 /* Single UNIX Spec 2 recursive mutex: */
183 case PTHREAD_MUTEX_RECURSIVE:
184 /* Reset the mutex count: */
185 pmutex->m_data.m_count = 0;
188 /* Trap invalid mutex types: */
190 /* Return an invalid argument error: */
195 /* Initialise the rest of the mutex: */
196 TAILQ_INIT(&pmutex->m_queue);
197 pmutex->m_flags |= MUTEX_FLAGS_INITED;
198 pmutex->m_owner = NULL;
199 pmutex->m_type = type;
200 pmutex->m_protocol = protocol;
201 pmutex->m_refcount = 0;
202 if (protocol == PTHREAD_PRIO_PROTECT)
203 pmutex->m_prio = ceiling;
206 pmutex->m_saved_prio = 0;
207 _MUTEX_INIT_LINK(pmutex);
208 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
216 /* Return the completion status: */
221 _pthread_mutex_destroy(pthread_mutex_t * mutex)
225 if (mutex == NULL || *mutex == NULL)
228 /* Lock the mutex structure: */
229 _SPINLOCK(&(*mutex)->lock);
232 * Check to see if this mutex is in use:
234 if (((*mutex)->m_owner != NULL) ||
235 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
236 ((*mutex)->m_refcount != 0)) {
239 /* Unlock the mutex structure: */
240 _SPINUNLOCK(&(*mutex)->lock);
244 * Free the memory allocated for the mutex
247 _MUTEX_ASSERT_NOT_OWNED(*mutex);
251 * Leave the caller's pointer NULL now that
252 * the mutex has been destroyed:
258 /* Return the completion status: */
263 init_static(pthread_mutex_t *mutex)
267 _SPINLOCK(&static_init_lock);
270 ret = _pthread_mutex_init(mutex, NULL);
274 _SPINUNLOCK(&static_init_lock);
280 init_static_private(pthread_mutex_t *mutex)
284 _SPINLOCK(&static_init_lock);
287 ret = _pthread_mutex_init(mutex, &static_mattr);
291 _SPINUNLOCK(&static_init_lock);
297 mutex_trylock_common(pthread_mutex_t *mutex)
299 struct pthread *curthread = _get_curthread();
302 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
303 "Uninitialized mutex in pthread_mutex_trylock_basic");
306 * Defer signals to protect the scheduling queues from
307 * access by the signal handler:
309 _thread_kern_sig_defer();
311 /* Lock the mutex structure: */
312 _SPINLOCK(&(*mutex)->lock);
315 * If the mutex was statically allocated, properly
316 * initialize the tail queue.
318 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
319 TAILQ_INIT(&(*mutex)->m_queue);
320 _MUTEX_INIT_LINK(*mutex);
321 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
324 /* Process according to mutex type: */
325 switch ((*mutex)->m_protocol) {
326 /* Default POSIX mutex: */
327 case PTHREAD_PRIO_NONE:
328 /* Check if this mutex is not locked: */
329 if ((*mutex)->m_owner == NULL) {
330 /* Lock the mutex for the running thread: */
331 (*mutex)->m_owner = curthread;
333 /* Add to the list of owned mutexes: */
334 _MUTEX_ASSERT_NOT_OWNED(*mutex);
335 TAILQ_INSERT_TAIL(&curthread->mutexq,
337 } else if ((*mutex)->m_owner == curthread)
338 ret = mutex_self_trylock(*mutex);
340 /* Return a busy error: */
344 /* POSIX priority inheritence mutex: */
345 case PTHREAD_PRIO_INHERIT:
346 /* Check if this mutex is not locked: */
347 if ((*mutex)->m_owner == NULL) {
348 /* Lock the mutex for the running thread: */
349 (*mutex)->m_owner = curthread;
351 /* Track number of priority mutexes owned: */
352 curthread->priority_mutex_count++;
355 * The mutex takes on the attributes of the
356 * running thread when there are no waiters.
358 (*mutex)->m_prio = curthread->active_priority;
359 (*mutex)->m_saved_prio =
360 curthread->inherited_priority;
362 /* Add to the list of owned mutexes: */
363 _MUTEX_ASSERT_NOT_OWNED(*mutex);
364 TAILQ_INSERT_TAIL(&curthread->mutexq,
366 } else if ((*mutex)->m_owner == curthread)
367 ret = mutex_self_trylock(*mutex);
369 /* Return a busy error: */
373 /* POSIX priority protection mutex: */
374 case PTHREAD_PRIO_PROTECT:
375 /* Check for a priority ceiling violation: */
376 if (curthread->active_priority > (*mutex)->m_prio)
379 /* Check if this mutex is not locked: */
380 else if ((*mutex)->m_owner == NULL) {
381 /* Lock the mutex for the running thread: */
382 (*mutex)->m_owner = curthread;
384 /* Track number of priority mutexes owned: */
385 curthread->priority_mutex_count++;
388 * The running thread inherits the ceiling
389 * priority of the mutex and executes at that
392 curthread->active_priority = (*mutex)->m_prio;
393 (*mutex)->m_saved_prio =
394 curthread->inherited_priority;
395 curthread->inherited_priority =
398 /* Add to the list of owned mutexes: */
399 _MUTEX_ASSERT_NOT_OWNED(*mutex);
400 TAILQ_INSERT_TAIL(&curthread->mutexq,
402 } else if ((*mutex)->m_owner == curthread)
403 ret = mutex_self_trylock(*mutex);
405 /* Return a busy error: */
409 /* Trap invalid mutex types: */
411 /* Return an invalid argument error: */
416 /* Unlock the mutex structure: */
417 _SPINUNLOCK(&(*mutex)->lock);
420 * Undefer and handle pending signals, yielding if
423 _thread_kern_sig_undefer();
425 /* Return the completion status: */
430 __pthread_mutex_trylock(pthread_mutex_t *mutex)
438 * If the mutex is statically initialized, perform the dynamic
441 else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
442 ret = mutex_trylock_common(mutex);
448 _pthread_mutex_trylock(pthread_mutex_t *mutex)
456 * If the mutex is statically initialized, perform the dynamic
457 * initialization marking the mutex private (delete safe):
459 else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
460 ret = mutex_trylock_common(mutex);
466 mutex_lock_common(pthread_mutex_t * mutex)
468 struct pthread *curthread = _get_curthread();
471 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
472 "Uninitialized mutex in pthread_mutex_trylock_basic");
474 /* Reset the interrupted flag: */
475 curthread->interrupted = 0;
478 * Enter a loop waiting to become the mutex owner. We need a
479 * loop in case the waiting thread is interrupted by a signal
480 * to execute a signal handler. It is not (currently) possible
481 * to remain in the waiting queue while running a handler.
482 * Instead, the thread is interrupted and backed out of the
483 * waiting queue prior to executing the signal handler.
487 * Defer signals to protect the scheduling queues from
488 * access by the signal handler:
490 _thread_kern_sig_defer();
492 /* Lock the mutex structure: */
493 _SPINLOCK(&(*mutex)->lock);
496 * If the mutex was statically allocated, properly
497 * initialize the tail queue.
499 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
500 TAILQ_INIT(&(*mutex)->m_queue);
501 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
502 _MUTEX_INIT_LINK(*mutex);
505 /* Process according to mutex type: */
506 switch ((*mutex)->m_protocol) {
507 /* Default POSIX mutex: */
508 case PTHREAD_PRIO_NONE:
509 if ((*mutex)->m_owner == NULL) {
510 /* Lock the mutex for this thread: */
511 (*mutex)->m_owner = curthread;
513 /* Add to the list of owned mutexes: */
514 _MUTEX_ASSERT_NOT_OWNED(*mutex);
515 TAILQ_INSERT_TAIL(&curthread->mutexq,
518 } else if ((*mutex)->m_owner == curthread)
519 ret = mutex_self_lock(*mutex);
522 * Join the queue of threads waiting to lock
525 mutex_queue_enq(*mutex, curthread);
528 * Keep a pointer to the mutex this thread
531 curthread->data.mutex = *mutex;
534 * Unlock the mutex structure and schedule the
537 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
538 &(*mutex)->lock, __FILE__, __LINE__);
540 /* Lock the mutex structure again: */
541 _SPINLOCK(&(*mutex)->lock);
545 /* POSIX priority inheritence mutex: */
546 case PTHREAD_PRIO_INHERIT:
547 /* Check if this mutex is not locked: */
548 if ((*mutex)->m_owner == NULL) {
549 /* Lock the mutex for this thread: */
550 (*mutex)->m_owner = curthread;
552 /* Track number of priority mutexes owned: */
553 curthread->priority_mutex_count++;
556 * The mutex takes on attributes of the
557 * running thread when there are no waiters.
559 (*mutex)->m_prio = curthread->active_priority;
560 (*mutex)->m_saved_prio =
561 curthread->inherited_priority;
562 curthread->inherited_priority =
565 /* Add to the list of owned mutexes: */
566 _MUTEX_ASSERT_NOT_OWNED(*mutex);
567 TAILQ_INSERT_TAIL(&curthread->mutexq,
570 } else if ((*mutex)->m_owner == curthread)
571 ret = mutex_self_lock(*mutex);
574 * Join the queue of threads waiting to lock
577 mutex_queue_enq(*mutex, curthread);
580 * Keep a pointer to the mutex this thread
583 curthread->data.mutex = *mutex;
585 if (curthread->active_priority >
587 /* Adjust priorities: */
588 mutex_priority_adjust(*mutex);
591 * Unlock the mutex structure and schedule the
594 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
595 &(*mutex)->lock, __FILE__, __LINE__);
597 /* Lock the mutex structure again: */
598 _SPINLOCK(&(*mutex)->lock);
602 /* POSIX priority protection mutex: */
603 case PTHREAD_PRIO_PROTECT:
604 /* Check for a priority ceiling violation: */
605 if (curthread->active_priority > (*mutex)->m_prio)
608 /* Check if this mutex is not locked: */
609 else if ((*mutex)->m_owner == NULL) {
611 * Lock the mutex for the running
614 (*mutex)->m_owner = curthread;
616 /* Track number of priority mutexes owned: */
617 curthread->priority_mutex_count++;
620 * The running thread inherits the ceiling
621 * priority of the mutex and executes at that
624 curthread->active_priority = (*mutex)->m_prio;
625 (*mutex)->m_saved_prio =
626 curthread->inherited_priority;
627 curthread->inherited_priority =
630 /* Add to the list of owned mutexes: */
631 _MUTEX_ASSERT_NOT_OWNED(*mutex);
632 TAILQ_INSERT_TAIL(&curthread->mutexq,
634 } else if ((*mutex)->m_owner == curthread)
635 ret = mutex_self_lock(*mutex);
638 * Join the queue of threads waiting to lock
641 mutex_queue_enq(*mutex, curthread);
644 * Keep a pointer to the mutex this thread
647 curthread->data.mutex = *mutex;
649 /* Clear any previous error: */
650 curthread->error = 0;
653 * Unlock the mutex structure and schedule the
656 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
657 &(*mutex)->lock, __FILE__, __LINE__);
659 /* Lock the mutex structure again: */
660 _SPINLOCK(&(*mutex)->lock);
663 * The threads priority may have changed while
664 * waiting for the mutex causing a ceiling
667 ret = curthread->error;
668 curthread->error = 0;
672 /* Trap invalid mutex types: */
674 /* Return an invalid argument error: */
680 * Check to see if this thread was interrupted and
681 * is still in the mutex queue of waiting threads:
683 if (curthread->interrupted != 0)
684 mutex_queue_remove(*mutex, curthread);
686 /* Unlock the mutex structure: */
687 _SPINUNLOCK(&(*mutex)->lock);
690 * Undefer and handle pending signals, yielding if
693 _thread_kern_sig_undefer();
694 } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
695 (curthread->interrupted == 0));
697 if (curthread->interrupted != 0 &&
698 curthread->continuation != NULL)
699 curthread->continuation((void *) curthread);
701 /* Return the completion status: */
706 __pthread_mutex_lock(pthread_mutex_t *mutex)
710 if (_thread_initial == NULL)
717 * If the mutex is statically initialized, perform the dynamic
720 else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
721 ret = mutex_lock_common(mutex);
727 _pthread_mutex_lock(pthread_mutex_t *mutex)
731 if (_thread_initial == NULL)
738 * If the mutex is statically initialized, perform the dynamic
739 * initialization marking it private (delete safe):
741 else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
742 ret = mutex_lock_common(mutex);
748 _pthread_mutex_unlock(pthread_mutex_t * mutex)
750 return (mutex_unlock_common(mutex, /* add reference */ 0));
754 _mutex_cv_unlock(pthread_mutex_t * mutex)
756 return (mutex_unlock_common(mutex, /* add reference */ 1));
760 _mutex_cv_lock(pthread_mutex_t * mutex)
763 if ((ret = _pthread_mutex_lock(mutex)) == 0)
764 (*mutex)->m_refcount--;
769 mutex_self_trylock(pthread_mutex_t mutex)
773 switch (mutex->m_type) {
775 /* case PTHREAD_MUTEX_DEFAULT: */
776 case PTHREAD_MUTEX_ERRORCHECK:
777 case PTHREAD_MUTEX_NORMAL:
779 * POSIX specifies that mutexes should return EDEADLK if a
780 * recursive lock is detected.
785 case PTHREAD_MUTEX_RECURSIVE:
786 /* Increment the lock count: */
787 mutex->m_data.m_count++;
791 /* Trap invalid mutex types; */
799 mutex_self_lock(pthread_mutex_t mutex)
803 switch (mutex->m_type) {
804 /* case PTHREAD_MUTEX_DEFAULT: */
805 case PTHREAD_MUTEX_ERRORCHECK:
807 * POSIX specifies that mutexes should return EDEADLK if a
808 * recursive lock is detected.
813 case PTHREAD_MUTEX_NORMAL:
815 * What SS2 define as a 'normal' mutex. Intentionally
816 * deadlock on attempts to get a lock you already own.
818 _thread_kern_sched_state_unlock(PS_DEADLOCK,
819 &mutex->lock, __FILE__, __LINE__);
822 case PTHREAD_MUTEX_RECURSIVE:
823 /* Increment the lock count: */
824 mutex->m_data.m_count++;
828 /* Trap invalid mutex types; */
836 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
838 struct pthread *curthread = _get_curthread();
841 if (mutex == NULL || *mutex == NULL) {
845 * Defer signals to protect the scheduling queues from
846 * access by the signal handler:
848 _thread_kern_sig_defer();
850 /* Lock the mutex structure: */
851 _SPINLOCK(&(*mutex)->lock);
853 /* Process according to mutex type: */
854 switch ((*mutex)->m_protocol) {
855 /* Default POSIX mutex: */
856 case PTHREAD_PRIO_NONE:
858 * Check if the running thread is not the owner of the
861 if ((*mutex)->m_owner != curthread) {
863 * Return an invalid argument error for no
864 * owner and a permission error otherwise:
866 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
868 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
869 ((*mutex)->m_data.m_count > 0)) {
870 /* Decrement the count: */
871 (*mutex)->m_data.m_count--;
874 * Clear the count in case this is recursive
877 (*mutex)->m_data.m_count = 0;
879 /* Remove the mutex from the threads queue. */
880 _MUTEX_ASSERT_IS_OWNED(*mutex);
881 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
883 _MUTEX_INIT_LINK(*mutex);
886 * Get the next thread from the queue of
887 * threads waiting on the mutex:
889 if (((*mutex)->m_owner =
890 mutex_queue_deq(*mutex)) != NULL) {
891 /* Make the new owner runnable: */
892 PTHREAD_NEW_STATE((*mutex)->m_owner,
896 * Add the mutex to the threads list of
899 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
903 * The owner is no longer waiting for
906 (*mutex)->m_owner->data.mutex = NULL;
911 /* POSIX priority inheritence mutex: */
912 case PTHREAD_PRIO_INHERIT:
914 * Check if the running thread is not the owner of the
917 if ((*mutex)->m_owner != curthread) {
919 * Return an invalid argument error for no
920 * owner and a permission error otherwise:
922 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
924 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
925 ((*mutex)->m_data.m_count > 0)) {
926 /* Decrement the count: */
927 (*mutex)->m_data.m_count--;
930 * Clear the count in case this is recursive
933 (*mutex)->m_data.m_count = 0;
936 * Restore the threads inherited priority and
937 * recompute the active priority (being careful
938 * not to override changes in the threads base
939 * priority subsequent to locking the mutex).
941 curthread->inherited_priority =
942 (*mutex)->m_saved_prio;
943 curthread->active_priority =
944 MAX(curthread->inherited_priority,
945 curthread->base_priority);
948 * This thread now owns one less priority mutex.
950 curthread->priority_mutex_count--;
952 /* Remove the mutex from the threads queue. */
953 _MUTEX_ASSERT_IS_OWNED(*mutex);
954 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
956 _MUTEX_INIT_LINK(*mutex);
959 * Get the next thread from the queue of threads
960 * waiting on the mutex:
962 if (((*mutex)->m_owner =
963 mutex_queue_deq(*mutex)) == NULL)
964 /* This mutex has no priority. */
965 (*mutex)->m_prio = 0;
968 * Track number of priority mutexes owned:
970 (*mutex)->m_owner->priority_mutex_count++;
973 * Add the mutex to the threads list
976 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
980 * The owner is no longer waiting for
983 (*mutex)->m_owner->data.mutex = NULL;
986 * Set the priority of the mutex. Since
987 * our waiting threads are in descending
988 * priority order, the priority of the
989 * mutex becomes the active priority of
990 * the thread we just dequeued.
993 (*mutex)->m_owner->active_priority;
996 * Save the owning threads inherited
999 (*mutex)->m_saved_prio =
1000 (*mutex)->m_owner->inherited_priority;
1003 * The owning threads inherited priority
1004 * now becomes his active priority (the
1005 * priority of the mutex).
1007 (*mutex)->m_owner->inherited_priority =
1011 * Make the new owner runnable:
1013 PTHREAD_NEW_STATE((*mutex)->m_owner,
1019 /* POSIX priority ceiling mutex: */
1020 case PTHREAD_PRIO_PROTECT:
1022 * Check if the running thread is not the owner of the
1025 if ((*mutex)->m_owner != curthread) {
1027 * Return an invalid argument error for no
1028 * owner and a permission error otherwise:
1030 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
1032 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1033 ((*mutex)->m_data.m_count > 0)) {
1034 /* Decrement the count: */
1035 (*mutex)->m_data.m_count--;
1038 * Clear the count in case this is recursive
1041 (*mutex)->m_data.m_count = 0;
1044 * Restore the threads inherited priority and
1045 * recompute the active priority (being careful
1046 * not to override changes in the threads base
1047 * priority subsequent to locking the mutex).
1049 curthread->inherited_priority =
1050 (*mutex)->m_saved_prio;
1051 curthread->active_priority =
1052 MAX(curthread->inherited_priority,
1053 curthread->base_priority);
1056 * This thread now owns one less priority mutex.
1058 curthread->priority_mutex_count--;
1060 /* Remove the mutex from the threads queue. */
1061 _MUTEX_ASSERT_IS_OWNED(*mutex);
1062 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
1064 _MUTEX_INIT_LINK(*mutex);
1067 * Enter a loop to find a waiting thread whose
1068 * active priority will not cause a ceiling
1071 while ((((*mutex)->m_owner =
1072 mutex_queue_deq(*mutex)) != NULL) &&
1073 ((*mutex)->m_owner->active_priority >
1074 (*mutex)->m_prio)) {
1076 * Either the mutex ceiling priority
1077 * been lowered and/or this threads
1078 * priority has been raised subsequent
1079 * to this thread being queued on the
1082 (*mutex)->m_owner->error = EINVAL;
1083 PTHREAD_NEW_STATE((*mutex)->m_owner,
1086 * The thread is no longer waiting for
1089 (*mutex)->m_owner->data.mutex = NULL;
1092 /* Check for a new owner: */
1093 if ((*mutex)->m_owner != NULL) {
1095 * Track number of priority mutexes owned:
1097 (*mutex)->m_owner->priority_mutex_count++;
1100 * Add the mutex to the threads list
1103 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1107 * The owner is no longer waiting for
1110 (*mutex)->m_owner->data.mutex = NULL;
1113 * Save the owning threads inherited
1116 (*mutex)->m_saved_prio =
1117 (*mutex)->m_owner->inherited_priority;
1120 * The owning thread inherits the
1121 * ceiling priority of the mutex and
1122 * executes at that priority:
1124 (*mutex)->m_owner->inherited_priority =
1126 (*mutex)->m_owner->active_priority =
1130 * Make the new owner runnable:
1132 PTHREAD_NEW_STATE((*mutex)->m_owner,
1138 /* Trap invalid mutex types: */
1140 /* Return an invalid argument error: */
1145 if ((ret == 0) && (add_reference != 0)) {
1146 /* Increment the reference count: */
1147 (*mutex)->m_refcount++;
1150 /* Unlock the mutex structure: */
1151 _SPINUNLOCK(&(*mutex)->lock);
1154 * Undefer and handle pending signals, yielding if
1157 _thread_kern_sig_undefer();
1160 /* Return the completion status: */
1166 * This function is called when a change in base priority occurs for
1167 * a thread that is holding or waiting for a priority protection or
1168 * inheritence mutex. A change in a threads base priority can effect
1169 * changes to active priorities of other threads and to the ordering
1170 * of mutex locking by waiting threads.
1172 * This must be called while thread scheduling is deferred.
1175 _mutex_notify_priochange(pthread_t pthread)
1177 /* Adjust the priorites of any owned priority mutexes: */
1178 if (pthread->priority_mutex_count > 0) {
1180 * Rescan the mutexes owned by this thread and correct
1181 * their priorities to account for this threads change
1182 * in priority. This has the side effect of changing
1183 * the threads active priority.
1185 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1189 * If this thread is waiting on a priority inheritence mutex,
1190 * check for priority adjustments. A change in priority can
1191 * also effect a ceiling violation(*) for a thread waiting on
1192 * a priority protection mutex; we don't perform the check here
1193 * as it is done in pthread_mutex_unlock.
1195 * (*) It should be noted that a priority change to a thread
1196 * _after_ taking and owning a priority ceiling mutex
1197 * does not affect ownership of that mutex; the ceiling
1198 * priority is only checked before mutex ownership occurs.
1200 if (pthread->state == PS_MUTEX_WAIT) {
1201 /* Lock the mutex structure: */
1202 _SPINLOCK(&pthread->data.mutex->lock);
1205 * Check to make sure this thread is still in the same state
1206 * (the spinlock above can yield the CPU to another thread):
1208 if (pthread->state == PS_MUTEX_WAIT) {
1210 * Remove and reinsert this thread into the list of
1211 * waiting threads to preserve decreasing priority
1214 mutex_queue_remove(pthread->data.mutex, pthread);
1215 mutex_queue_enq(pthread->data.mutex, pthread);
1217 if (pthread->data.mutex->m_protocol ==
1218 PTHREAD_PRIO_INHERIT) {
1219 /* Adjust priorities: */
1220 mutex_priority_adjust(pthread->data.mutex);
1224 /* Unlock the mutex structure: */
1225 _SPINUNLOCK(&pthread->data.mutex->lock);
1230 * Called when a new thread is added to the mutex waiting queue or
1231 * when a threads priority changes that is already in the mutex
1235 mutex_priority_adjust(pthread_mutex_t mutex)
1237 pthread_t pthread_next, pthread = mutex->m_owner;
1239 pthread_mutex_t m = mutex;
1242 * Calculate the mutex priority as the maximum of the highest
1243 * active priority of any waiting threads and the owning threads
1244 * active priority(*).
1246 * (*) Because the owning threads current active priority may
1247 * reflect priority inherited from this mutex (and the mutex
1248 * priority may have changed) we must recalculate the active
1249 * priority based on the threads saved inherited priority
1250 * and its base priority.
1252 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1253 temp_prio = MAX(pthread_next->active_priority,
1254 MAX(m->m_saved_prio, pthread->base_priority));
1256 /* See if this mutex really needs adjusting: */
1257 if (temp_prio == m->m_prio)
1258 /* No need to propagate the priority: */
1261 /* Set new priority of the mutex: */
1262 m->m_prio = temp_prio;
1266 * Save the threads priority before rescanning the
1269 temp_prio = pthread->active_priority;
1272 * Fix the priorities for all the mutexes this thread has
1273 * locked since taking this mutex. This also has a
1274 * potential side-effect of changing the threads priority.
1276 mutex_rescan_owned(pthread, m);
1279 * If the thread is currently waiting on a mutex, check
1280 * to see if the threads new priority has affected the
1281 * priority of the mutex.
1283 if ((temp_prio != pthread->active_priority) &&
1284 (pthread->state == PS_MUTEX_WAIT) &&
1285 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1286 /* Grab the mutex this thread is waiting on: */
1287 m = pthread->data.mutex;
1290 * The priority for this thread has changed. Remove
1291 * and reinsert this thread into the list of waiting
1292 * threads to preserve decreasing priority order.
1294 mutex_queue_remove(m, pthread);
1295 mutex_queue_enq(m, pthread);
1297 /* Grab the waiting thread with highest priority: */
1298 pthread_next = TAILQ_FIRST(&m->m_queue);
1301 * Calculate the mutex priority as the maximum of the
1302 * highest active priority of any waiting threads and
1303 * the owning threads active priority.
1305 temp_prio = MAX(pthread_next->active_priority,
1306 MAX(m->m_saved_prio, m->m_owner->base_priority));
1308 if (temp_prio != m->m_prio) {
1310 * The priority needs to be propagated to the
1311 * mutex this thread is waiting on and up to
1312 * the owner of that mutex.
1314 m->m_prio = temp_prio;
1315 pthread = m->m_owner;
1329 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1331 int active_prio, inherited_prio;
1333 pthread_t pthread_next;
1336 * Start walking the mutexes the thread has taken since
1337 * taking this mutex.
1339 if (mutex == NULL) {
1341 * A null mutex means start at the beginning of the owned
1344 m = TAILQ_FIRST(&pthread->mutexq);
1346 /* There is no inherited priority yet. */
1351 * The caller wants to start after a specific mutex. It
1352 * is assumed that this mutex is a priority inheritence
1353 * mutex and that its priority has been correctly
1356 m = TAILQ_NEXT(mutex, m_qe);
1358 /* Start inheriting priority from the specified mutex. */
1359 inherited_prio = mutex->m_prio;
1361 active_prio = MAX(inherited_prio, pthread->base_priority);
1365 * We only want to deal with priority inheritence
1366 * mutexes. This might be optimized by only placing
1367 * priority inheritence mutexes into the owned mutex
1368 * list, but it may prove to be useful having all
1369 * owned mutexes in this list. Consider a thread
1370 * exiting while holding mutexes...
1372 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1374 * Fix the owners saved (inherited) priority to
1375 * reflect the priority of the previous mutex.
1377 m->m_saved_prio = inherited_prio;
1379 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1380 /* Recalculate the priority of the mutex: */
1381 m->m_prio = MAX(active_prio,
1382 pthread_next->active_priority);
1384 m->m_prio = active_prio;
1386 /* Recalculate new inherited and active priorities: */
1387 inherited_prio = m->m_prio;
1388 active_prio = MAX(m->m_prio, pthread->base_priority);
1391 /* Advance to the next mutex owned by this thread: */
1392 m = TAILQ_NEXT(m, m_qe);
1396 * Fix the threads inherited priority and recalculate its
1399 pthread->inherited_priority = inherited_prio;
1400 active_prio = MAX(inherited_prio, pthread->base_priority);
1402 if (active_prio != pthread->active_priority) {
1404 * If this thread is in the priority queue, it must be
1405 * removed and reinserted for its new priority.
1407 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1409 * Remove the thread from the priority queue
1410 * before changing its priority:
1412 PTHREAD_PRIOQ_REMOVE(pthread);
1415 * POSIX states that if the priority is being
1416 * lowered, the thread must be inserted at the
1417 * head of the queue for its priority if it owns
1418 * any priority protection or inheritence mutexes.
1420 if ((active_prio < pthread->active_priority) &&
1421 (pthread->priority_mutex_count > 0)) {
1422 /* Set the new active priority. */
1423 pthread->active_priority = active_prio;
1425 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1428 /* Set the new active priority. */
1429 pthread->active_priority = active_prio;
1431 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1435 /* Set the new active priority. */
1436 pthread->active_priority = active_prio;
1442 _mutex_unlock_private(pthread_t pthread)
1444 struct pthread_mutex *m, *m_next;
1446 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1447 m_next = TAILQ_NEXT(m, m_qe);
1448 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1449 _pthread_mutex_unlock(&m);
1454 _mutex_lock_backout(pthread_t pthread)
1456 struct pthread_mutex *mutex;
1459 * Defer signals to protect the scheduling queues from
1460 * access by the signal handler:
1462 _thread_kern_sig_defer();
1463 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1464 mutex = pthread->data.mutex;
1466 /* Lock the mutex structure: */
1467 _SPINLOCK(&mutex->lock);
1469 mutex_queue_remove(mutex, pthread);
1471 /* This thread is no longer waiting for the mutex: */
1472 pthread->data.mutex = NULL;
1474 /* Unlock the mutex structure: */
1475 _SPINUNLOCK(&mutex->lock);
1479 * Undefer and handle pending signals, yielding if
1482 _thread_kern_sig_undefer();
1486 * Dequeue a waiting thread from the head of a mutex queue in descending
1489 static inline pthread_t
1490 mutex_queue_deq(pthread_mutex_t mutex)
1494 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1495 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1496 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1499 * Only exit the loop if the thread hasn't been
1502 if (pthread->interrupted == 0)
1510 * Remove a waiting thread from a mutex queue in descending priority order.
1513 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1515 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1516 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1517 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1522 * Enqueue a waiting thread to a queue in descending priority order.
1525 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1527 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1529 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1531 * For the common case of all threads having equal priority,
1532 * we perform a quick check against the priority of the thread
1533 * at the tail of the queue.
1535 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1536 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1538 tid = TAILQ_FIRST(&mutex->m_queue);
1539 while (pthread->active_priority <= tid->active_priority)
1540 tid = TAILQ_NEXT(tid, sqe);
1541 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1543 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;