2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/queue.h>
41 #include "pthread_private.h"
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m) do { \
45 (m)->m_qe.tqe_prev = NULL; \
46 (m)->m_qe.tqe_next = NULL; \
48 #define _MUTEX_ASSERT_IS_OWNED(m) do { \
49 if ((m)->m_qe.tqe_prev == NULL) \
50 PANIC("mutex is not on list"); \
52 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \
53 if (((m)->m_qe.tqe_prev != NULL) || \
54 ((m)->m_qe.tqe_next != NULL)) \
55 PANIC("mutex is on list"); \
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
66 static inline int mutex_self_trylock(pthread_mutex_t);
67 static inline int mutex_self_lock(pthread_mutex_t);
68 static inline int mutex_unlock_common(pthread_mutex_t *, int);
69 static void mutex_priority_adjust(pthread_mutex_t);
70 static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
71 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
72 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
73 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
76 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
78 /* Reinitialize a mutex to defaults. */
80 _mutex_reinit(pthread_mutex_t * mutex)
86 else if (*mutex == NULL)
87 ret = pthread_mutex_init(mutex, NULL);
90 * Initialize the mutex structure:
92 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
93 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
94 TAILQ_INIT(&(*mutex)->m_queue);
95 (*mutex)->m_owner = NULL;
96 (*mutex)->m_data.m_count = 0;
97 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
98 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
99 (*mutex)->m_refcount = 0;
100 (*mutex)->m_prio = 0;
101 (*mutex)->m_saved_prio = 0;
102 _MUTEX_INIT_LINK(*mutex);
103 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
109 pthread_mutex_init(pthread_mutex_t * mutex,
110 const pthread_mutexattr_t * mutex_attr)
112 enum pthread_mutextype type;
115 pthread_mutex_t pmutex;
121 /* Check if default mutex attributes: */
122 else if (mutex_attr == NULL || *mutex_attr == NULL) {
123 /* Default to a (error checking) POSIX mutex: */
124 type = PTHREAD_MUTEX_ERRORCHECK;
125 protocol = PTHREAD_PRIO_NONE;
126 ceiling = PTHREAD_MAX_PRIORITY;
129 /* Check mutex type: */
130 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132 /* Return an invalid argument error: */
135 /* Check mutex protocol: */
136 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138 /* Return an invalid argument error: */
142 /* Use the requested mutex type and protocol: */
143 type = (*mutex_attr)->m_type;
144 protocol = (*mutex_attr)->m_protocol;
145 ceiling = (*mutex_attr)->m_ceiling;
148 /* Check no errors so far: */
150 if ((pmutex = (pthread_mutex_t)
151 malloc(sizeof(struct pthread_mutex))) == NULL)
154 /* Reset the mutex flags: */
157 /* Process according to mutex type: */
159 /* case PTHREAD_MUTEX_DEFAULT: */
160 case PTHREAD_MUTEX_ERRORCHECK:
161 case PTHREAD_MUTEX_NORMAL:
162 /* Nothing to do here. */
165 /* Single UNIX Spec 2 recursive mutex: */
166 case PTHREAD_MUTEX_RECURSIVE:
167 /* Reset the mutex count: */
168 pmutex->m_data.m_count = 0;
171 /* Trap invalid mutex types: */
173 /* Return an invalid argument error: */
178 /* Initialise the rest of the mutex: */
179 TAILQ_INIT(&pmutex->m_queue);
180 pmutex->m_flags |= MUTEX_FLAGS_INITED;
181 pmutex->m_owner = NULL;
182 pmutex->m_type = type;
183 pmutex->m_protocol = protocol;
184 pmutex->m_refcount = 0;
185 if (protocol == PTHREAD_PRIO_PROTECT)
186 pmutex->m_prio = ceiling;
189 pmutex->m_saved_prio = 0;
190 _MUTEX_INIT_LINK(pmutex);
191 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
199 /* Return the completion status: */
204 pthread_mutex_destroy(pthread_mutex_t * mutex)
208 if (mutex == NULL || *mutex == NULL)
211 /* Lock the mutex structure: */
212 _SPINLOCK(&(*mutex)->lock);
215 * Check to see if this mutex is in use:
217 if (((*mutex)->m_owner != NULL) ||
218 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
219 ((*mutex)->m_refcount != 0)) {
222 /* Unlock the mutex structure: */
223 _SPINUNLOCK(&(*mutex)->lock);
227 * Free the memory allocated for the mutex
230 _MUTEX_ASSERT_NOT_OWNED(*mutex);
234 * Leave the caller's pointer NULL now that
235 * the mutex has been destroyed:
241 /* Return the completion status: */
246 init_static(pthread_mutex_t *mutex)
250 _SPINLOCK(&static_init_lock);
253 ret = pthread_mutex_init(mutex, NULL);
257 _SPINUNLOCK(&static_init_lock);
263 pthread_mutex_trylock(pthread_mutex_t * mutex)
271 * If the mutex is statically initialized, perform the dynamic
274 else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
276 * Defer signals to protect the scheduling queues from
277 * access by the signal handler:
279 _thread_kern_sig_defer();
281 /* Lock the mutex structure: */
282 _SPINLOCK(&(*mutex)->lock);
285 * If the mutex was statically allocated, properly
286 * initialize the tail queue.
288 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
289 TAILQ_INIT(&(*mutex)->m_queue);
290 _MUTEX_INIT_LINK(*mutex);
291 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
294 /* Process according to mutex type: */
295 switch ((*mutex)->m_protocol) {
296 /* Default POSIX mutex: */
297 case PTHREAD_PRIO_NONE:
298 /* Check if this mutex is not locked: */
299 if ((*mutex)->m_owner == NULL) {
300 /* Lock the mutex for the running thread: */
301 (*mutex)->m_owner = _thread_run;
303 /* Add to the list of owned mutexes: */
304 _MUTEX_ASSERT_NOT_OWNED(*mutex);
305 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
307 } else if ((*mutex)->m_owner == _thread_run)
308 ret = mutex_self_trylock(*mutex);
310 /* Return a busy error: */
314 /* POSIX priority inheritence mutex: */
315 case PTHREAD_PRIO_INHERIT:
316 /* Check if this mutex is not locked: */
317 if ((*mutex)->m_owner == NULL) {
318 /* Lock the mutex for the running thread: */
319 (*mutex)->m_owner = _thread_run;
321 /* Track number of priority mutexes owned: */
322 _thread_run->priority_mutex_count++;
325 * The mutex takes on the attributes of the
326 * running thread when there are no waiters.
328 (*mutex)->m_prio = _thread_run->active_priority;
329 (*mutex)->m_saved_prio =
330 _thread_run->inherited_priority;
332 /* Add to the list of owned mutexes: */
333 _MUTEX_ASSERT_NOT_OWNED(*mutex);
334 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
336 } else if ((*mutex)->m_owner == _thread_run)
337 ret = mutex_self_trylock(*mutex);
339 /* Return a busy error: */
343 /* POSIX priority protection mutex: */
344 case PTHREAD_PRIO_PROTECT:
345 /* Check for a priority ceiling violation: */
346 if (_thread_run->active_priority > (*mutex)->m_prio)
349 /* Check if this mutex is not locked: */
350 else if ((*mutex)->m_owner == NULL) {
351 /* Lock the mutex for the running thread: */
352 (*mutex)->m_owner = _thread_run;
354 /* Track number of priority mutexes owned: */
355 _thread_run->priority_mutex_count++;
358 * The running thread inherits the ceiling
359 * priority of the mutex and executes at that
362 _thread_run->active_priority = (*mutex)->m_prio;
363 (*mutex)->m_saved_prio =
364 _thread_run->inherited_priority;
365 _thread_run->inherited_priority =
368 /* Add to the list of owned mutexes: */
369 _MUTEX_ASSERT_NOT_OWNED(*mutex);
370 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
372 } else if ((*mutex)->m_owner == _thread_run)
373 ret = mutex_self_trylock(*mutex);
375 /* Return a busy error: */
379 /* Trap invalid mutex types: */
381 /* Return an invalid argument error: */
386 /* Unlock the mutex structure: */
387 _SPINUNLOCK(&(*mutex)->lock);
390 * Undefer and handle pending signals, yielding if
393 _thread_kern_sig_undefer();
396 /* Return the completion status: */
401 pthread_mutex_lock(pthread_mutex_t * mutex)
409 * If the mutex is statically initialized, perform the dynamic
412 else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
414 * Defer signals to protect the scheduling queues from
415 * access by the signal handler:
417 _thread_kern_sig_defer();
419 /* Lock the mutex structure: */
420 _SPINLOCK(&(*mutex)->lock);
423 * If the mutex was statically allocated, properly
424 * initialize the tail queue.
426 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
427 TAILQ_INIT(&(*mutex)->m_queue);
428 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
429 _MUTEX_INIT_LINK(*mutex);
432 /* Reset the interrupted flag: */
433 _thread_run->interrupted = 0;
435 /* Process according to mutex type: */
436 switch ((*mutex)->m_protocol) {
437 /* Default POSIX mutex: */
438 case PTHREAD_PRIO_NONE:
439 if ((*mutex)->m_owner == NULL) {
440 /* Lock the mutex for this thread: */
441 (*mutex)->m_owner = _thread_run;
443 /* Add to the list of owned mutexes: */
444 _MUTEX_ASSERT_NOT_OWNED(*mutex);
445 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
448 } else if ((*mutex)->m_owner == _thread_run)
449 ret = mutex_self_lock(*mutex);
452 * Join the queue of threads waiting to lock
455 mutex_queue_enq(*mutex, _thread_run);
458 * Keep a pointer to the mutex this thread
461 _thread_run->data.mutex = *mutex;
464 * Unlock the mutex structure and schedule the
467 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
468 &(*mutex)->lock, __FILE__, __LINE__);
470 /* Lock the mutex structure again: */
471 _SPINLOCK(&(*mutex)->lock);
475 /* POSIX priority inheritence mutex: */
476 case PTHREAD_PRIO_INHERIT:
477 /* Check if this mutex is not locked: */
478 if ((*mutex)->m_owner == NULL) {
479 /* Lock the mutex for this thread: */
480 (*mutex)->m_owner = _thread_run;
482 /* Track number of priority mutexes owned: */
483 _thread_run->priority_mutex_count++;
486 * The mutex takes on attributes of the
487 * running thread when there are no waiters.
489 (*mutex)->m_prio = _thread_run->active_priority;
490 (*mutex)->m_saved_prio =
491 _thread_run->inherited_priority;
492 _thread_run->inherited_priority =
495 /* Add to the list of owned mutexes: */
496 _MUTEX_ASSERT_NOT_OWNED(*mutex);
497 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
500 } else if ((*mutex)->m_owner == _thread_run)
501 ret = mutex_self_lock(*mutex);
504 * Join the queue of threads waiting to lock
507 mutex_queue_enq(*mutex, _thread_run);
510 * Keep a pointer to the mutex this thread
513 _thread_run->data.mutex = *mutex;
515 if (_thread_run->active_priority >
517 /* Adjust priorities: */
518 mutex_priority_adjust(*mutex);
521 * Unlock the mutex structure and schedule the
524 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
525 &(*mutex)->lock, __FILE__, __LINE__);
527 /* Lock the mutex structure again: */
528 _SPINLOCK(&(*mutex)->lock);
532 /* POSIX priority protection mutex: */
533 case PTHREAD_PRIO_PROTECT:
534 /* Check for a priority ceiling violation: */
535 if (_thread_run->active_priority > (*mutex)->m_prio)
538 /* Check if this mutex is not locked: */
539 else if ((*mutex)->m_owner == NULL) {
541 * Lock the mutex for the running
544 (*mutex)->m_owner = _thread_run;
546 /* Track number of priority mutexes owned: */
547 _thread_run->priority_mutex_count++;
550 * The running thread inherits the ceiling
551 * priority of the mutex and executes at that
554 _thread_run->active_priority = (*mutex)->m_prio;
555 (*mutex)->m_saved_prio =
556 _thread_run->inherited_priority;
557 _thread_run->inherited_priority =
560 /* Add to the list of owned mutexes: */
561 _MUTEX_ASSERT_NOT_OWNED(*mutex);
562 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
564 } else if ((*mutex)->m_owner == _thread_run)
565 ret = mutex_self_lock(*mutex);
568 * Join the queue of threads waiting to lock
571 mutex_queue_enq(*mutex, _thread_run);
574 * Keep a pointer to the mutex this thread
577 _thread_run->data.mutex = *mutex;
579 /* Clear any previous error: */
580 _thread_run->error = 0;
583 * Unlock the mutex structure and schedule the
586 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
587 &(*mutex)->lock, __FILE__, __LINE__);
589 /* Lock the mutex structure again: */
590 _SPINLOCK(&(*mutex)->lock);
593 * The threads priority may have changed while
594 * waiting for the mutex causing a ceiling
597 ret = _thread_run->error;
598 _thread_run->error = 0;
602 /* Trap invalid mutex types: */
604 /* Return an invalid argument error: */
610 * Check to see if this thread was interrupted and
611 * is still in the mutex queue of waiting threads:
613 if (_thread_run->interrupted != 0)
614 mutex_queue_remove(*mutex, _thread_run);
616 /* Unlock the mutex structure: */
617 _SPINUNLOCK(&(*mutex)->lock);
620 * Undefer and handle pending signals, yielding if
623 _thread_kern_sig_undefer();
625 if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
626 _thread_run->cancelflags &= ~PTHREAD_CANCEL_NEEDED;
627 _thread_exit_cleanup();
628 pthread_exit(PTHREAD_CANCELED);
632 /* Return the completion status: */
637 pthread_mutex_unlock(pthread_mutex_t * mutex)
639 return (mutex_unlock_common(mutex, /* add reference */ 0));
643 _mutex_cv_unlock(pthread_mutex_t * mutex)
645 return (mutex_unlock_common(mutex, /* add reference */ 1));
649 _mutex_cv_lock(pthread_mutex_t * mutex)
652 if ((ret = pthread_mutex_lock(mutex)) == 0)
653 (*mutex)->m_refcount--;
658 mutex_self_trylock(pthread_mutex_t mutex)
662 switch (mutex->m_type) {
664 /* case PTHREAD_MUTEX_DEFAULT: */
665 case PTHREAD_MUTEX_ERRORCHECK:
666 case PTHREAD_MUTEX_NORMAL:
668 * POSIX specifies that mutexes should return EDEADLK if a
669 * recursive lock is detected.
674 case PTHREAD_MUTEX_RECURSIVE:
675 /* Increment the lock count: */
676 mutex->m_data.m_count++;
680 /* Trap invalid mutex types; */
688 mutex_self_lock(pthread_mutex_t mutex)
692 switch (mutex->m_type) {
693 /* case PTHREAD_MUTEX_DEFAULT: */
694 case PTHREAD_MUTEX_ERRORCHECK:
696 * POSIX specifies that mutexes should return EDEADLK if a
697 * recursive lock is detected.
702 case PTHREAD_MUTEX_NORMAL:
704 * What SS2 define as a 'normal' mutex. Intentionally
705 * deadlock on attempts to get a lock you already own.
707 _thread_kern_sched_state_unlock(PS_DEADLOCK,
708 &mutex->lock, __FILE__, __LINE__);
711 case PTHREAD_MUTEX_RECURSIVE:
712 /* Increment the lock count: */
713 mutex->m_data.m_count++;
717 /* Trap invalid mutex types; */
725 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
729 if (mutex == NULL || *mutex == NULL) {
733 * Defer signals to protect the scheduling queues from
734 * access by the signal handler:
736 _thread_kern_sig_defer();
738 /* Lock the mutex structure: */
739 _SPINLOCK(&(*mutex)->lock);
741 /* Process according to mutex type: */
742 switch ((*mutex)->m_protocol) {
743 /* Default POSIX mutex: */
744 case PTHREAD_PRIO_NONE:
746 * Check if the running thread is not the owner of the
749 if ((*mutex)->m_owner != _thread_run) {
751 * Return an invalid argument error for no
752 * owner and a permission error otherwise:
754 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
756 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
757 ((*mutex)->m_data.m_count > 1)) {
758 /* Decrement the count: */
759 (*mutex)->m_data.m_count--;
762 * Clear the count in case this is recursive
765 (*mutex)->m_data.m_count = 0;
767 /* Remove the mutex from the threads queue. */
768 _MUTEX_ASSERT_IS_OWNED(*mutex);
769 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
771 _MUTEX_INIT_LINK(*mutex);
774 * Get the next thread from the queue of
775 * threads waiting on the mutex:
777 if (((*mutex)->m_owner =
778 mutex_queue_deq(*mutex)) != NULL) {
780 * Allow the new owner of the mutex to
783 PTHREAD_NEW_STATE((*mutex)->m_owner,
787 * Add the mutex to the threads list of
790 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
794 * The owner is no longer waiting for
797 (*mutex)->m_owner->data.mutex = NULL;
802 /* POSIX priority inheritence mutex: */
803 case PTHREAD_PRIO_INHERIT:
805 * Check if the running thread is not the owner of the
808 if ((*mutex)->m_owner != _thread_run) {
810 * Return an invalid argument error for no
811 * owner and a permission error otherwise:
813 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
815 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
816 ((*mutex)->m_data.m_count > 1)) {
817 /* Decrement the count: */
818 (*mutex)->m_data.m_count--;
821 * Clear the count in case this is recursive
824 (*mutex)->m_data.m_count = 0;
827 * Restore the threads inherited priority and
828 * recompute the active priority (being careful
829 * not to override changes in the threads base
830 * priority subsequent to locking the mutex).
832 _thread_run->inherited_priority =
833 (*mutex)->m_saved_prio;
834 _thread_run->active_priority =
835 MAX(_thread_run->inherited_priority,
836 _thread_run->base_priority);
839 * This thread now owns one less priority mutex.
841 _thread_run->priority_mutex_count--;
843 /* Remove the mutex from the threads queue. */
844 _MUTEX_ASSERT_IS_OWNED(*mutex);
845 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
847 _MUTEX_INIT_LINK(*mutex);
850 * Get the next thread from the queue of threads
851 * waiting on the mutex:
853 if (((*mutex)->m_owner =
854 mutex_queue_deq(*mutex)) == NULL)
855 /* This mutex has no priority. */
856 (*mutex)->m_prio = 0;
859 * Track number of priority mutexes owned:
861 (*mutex)->m_owner->priority_mutex_count++;
864 * Add the mutex to the threads list
867 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
871 * The owner is no longer waiting for
874 (*mutex)->m_owner->data.mutex = NULL;
877 * Set the priority of the mutex. Since
878 * our waiting threads are in descending
879 * priority order, the priority of the
880 * mutex becomes the active priority of
881 * the thread we just dequeued.
884 (*mutex)->m_owner->active_priority;
887 * Save the owning threads inherited
890 (*mutex)->m_saved_prio =
891 (*mutex)->m_owner->inherited_priority;
894 * The owning threads inherited priority
895 * now becomes his active priority (the
896 * priority of the mutex).
898 (*mutex)->m_owner->inherited_priority =
902 * Allow the new owner of the mutex to
905 PTHREAD_NEW_STATE((*mutex)->m_owner,
911 /* POSIX priority ceiling mutex: */
912 case PTHREAD_PRIO_PROTECT:
914 * Check if the running thread is not the owner of the
917 if ((*mutex)->m_owner != _thread_run) {
919 * Return an invalid argument error for no
920 * owner and a permission error otherwise:
922 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
924 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
925 ((*mutex)->m_data.m_count > 1)) {
926 /* Decrement the count: */
927 (*mutex)->m_data.m_count--;
930 * Clear the count in case this is recursive
933 (*mutex)->m_data.m_count = 0;
936 * Restore the threads inherited priority and
937 * recompute the active priority (being careful
938 * not to override changes in the threads base
939 * priority subsequent to locking the mutex).
941 _thread_run->inherited_priority =
942 (*mutex)->m_saved_prio;
943 _thread_run->active_priority =
944 MAX(_thread_run->inherited_priority,
945 _thread_run->base_priority);
948 * This thread now owns one less priority mutex.
950 _thread_run->priority_mutex_count--;
952 /* Remove the mutex from the threads queue. */
953 _MUTEX_ASSERT_IS_OWNED(*mutex);
954 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
956 _MUTEX_INIT_LINK(*mutex);
959 * Enter a loop to find a waiting thread whose
960 * active priority will not cause a ceiling
963 while ((((*mutex)->m_owner =
964 mutex_queue_deq(*mutex)) != NULL) &&
965 ((*mutex)->m_owner->active_priority >
968 * Either the mutex ceiling priority
969 * been lowered and/or this threads
970 * priority has been raised subsequent
971 * to this thread being queued on the
974 (*mutex)->m_owner->error = EINVAL;
975 PTHREAD_NEW_STATE((*mutex)->m_owner,
978 * The thread is no longer waiting for
981 (*mutex)->m_owner->data.mutex = NULL;
984 /* Check for a new owner: */
985 if ((*mutex)->m_owner != NULL) {
987 * Track number of priority mutexes owned:
989 (*mutex)->m_owner->priority_mutex_count++;
992 * Add the mutex to the threads list
995 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
999 * The owner is no longer waiting for
1002 (*mutex)->m_owner->data.mutex = NULL;
1005 * Save the owning threads inherited
1008 (*mutex)->m_saved_prio =
1009 (*mutex)->m_owner->inherited_priority;
1012 * The owning thread inherits the
1013 * ceiling priority of the mutex and
1014 * executes at that priority:
1016 (*mutex)->m_owner->inherited_priority =
1018 (*mutex)->m_owner->active_priority =
1022 * Allow the new owner of the mutex to
1025 PTHREAD_NEW_STATE((*mutex)->m_owner,
1031 /* Trap invalid mutex types: */
1033 /* Return an invalid argument error: */
1038 if ((ret == 0) && (add_reference != 0)) {
1039 /* Increment the reference count: */
1040 (*mutex)->m_refcount++;
1043 /* Unlock the mutex structure: */
1044 _SPINUNLOCK(&(*mutex)->lock);
1047 * Undefer and handle pending signals, yielding if
1050 _thread_kern_sig_undefer();
1053 /* Return the completion status: */
1059 * This function is called when a change in base priority occurs for
1060 * a thread that is holding or waiting for a priority protection or
1061 * inheritence mutex. A change in a threads base priority can effect
1062 * changes to active priorities of other threads and to the ordering
1063 * of mutex locking by waiting threads.
1065 * This must be called while thread scheduling is deferred.
1068 _mutex_notify_priochange(pthread_t pthread)
1070 /* Adjust the priorites of any owned priority mutexes: */
1071 if (pthread->priority_mutex_count > 0) {
1073 * Rescan the mutexes owned by this thread and correct
1074 * their priorities to account for this threads change
1075 * in priority. This has the side effect of changing
1076 * the threads active priority.
1078 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1082 * If this thread is waiting on a priority inheritence mutex,
1083 * check for priority adjustments. A change in priority can
1084 * also effect a ceiling violation(*) for a thread waiting on
1085 * a priority protection mutex; we don't perform the check here
1086 * as it is done in pthread_mutex_unlock.
1088 * (*) It should be noted that a priority change to a thread
1089 * _after_ taking and owning a priority ceiling mutex
1090 * does not affect ownership of that mutex; the ceiling
1091 * priority is only checked before mutex ownership occurs.
1093 if (pthread->state == PS_MUTEX_WAIT) {
1094 /* Lock the mutex structure: */
1095 _SPINLOCK(&pthread->data.mutex->lock);
1098 * Check to make sure this thread is still in the same state
1099 * (the spinlock above can yield the CPU to another thread):
1101 if (pthread->state == PS_MUTEX_WAIT) {
1103 * Remove and reinsert this thread into the list of
1104 * waiting threads to preserve decreasing priority
1107 mutex_queue_remove(pthread->data.mutex, pthread);
1108 mutex_queue_enq(pthread->data.mutex, pthread);
1110 if (pthread->data.mutex->m_protocol ==
1111 PTHREAD_PRIO_INHERIT) {
1112 /* Adjust priorities: */
1113 mutex_priority_adjust(pthread->data.mutex);
1117 /* Unlock the mutex structure: */
1118 _SPINUNLOCK(&pthread->data.mutex->lock);
1123 * Called when a new thread is added to the mutex waiting queue or
1124 * when a threads priority changes that is already in the mutex
1128 mutex_priority_adjust(pthread_mutex_t mutex)
1130 pthread_t pthread_next, pthread = mutex->m_owner;
1132 pthread_mutex_t m = mutex;
1135 * Calculate the mutex priority as the maximum of the highest
1136 * active priority of any waiting threads and the owning threads
1137 * active priority(*).
1139 * (*) Because the owning threads current active priority may
1140 * reflect priority inherited from this mutex (and the mutex
1141 * priority may have changed) we must recalculate the active
1142 * priority based on the threads saved inherited priority
1143 * and its base priority.
1145 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1146 temp_prio = MAX(pthread_next->active_priority,
1147 MAX(m->m_saved_prio, pthread->base_priority));
1149 /* See if this mutex really needs adjusting: */
1150 if (temp_prio == m->m_prio)
1151 /* No need to propagate the priority: */
1154 /* Set new priority of the mutex: */
1155 m->m_prio = temp_prio;
1159 * Save the threads priority before rescanning the
1162 temp_prio = pthread->active_priority;
1165 * Fix the priorities for all the mutexes this thread has
1166 * locked since taking this mutex. This also has a
1167 * potential side-effect of changing the threads priority.
1169 mutex_rescan_owned(pthread, m);
1172 * If the thread is currently waiting on a mutex, check
1173 * to see if the threads new priority has affected the
1174 * priority of the mutex.
1176 if ((temp_prio != pthread->active_priority) &&
1177 (pthread->state == PS_MUTEX_WAIT) &&
1178 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1179 /* Grab the mutex this thread is waiting on: */
1180 m = pthread->data.mutex;
1183 * The priority for this thread has changed. Remove
1184 * and reinsert this thread into the list of waiting
1185 * threads to preserve decreasing priority order.
1187 mutex_queue_remove(m, pthread);
1188 mutex_queue_enq(m, pthread);
1190 /* Grab the waiting thread with highest priority: */
1191 pthread_next = TAILQ_FIRST(&m->m_queue);
1194 * Calculate the mutex priority as the maximum of the
1195 * highest active priority of any waiting threads and
1196 * the owning threads active priority.
1198 temp_prio = MAX(pthread_next->active_priority,
1199 MAX(m->m_saved_prio, m->m_owner->base_priority));
1201 if (temp_prio != m->m_prio) {
1203 * The priority needs to be propagated to the
1204 * mutex this thread is waiting on and up to
1205 * the owner of that mutex.
1207 m->m_prio = temp_prio;
1208 pthread = m->m_owner;
1222 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1224 int active_prio, inherited_prio;
1226 pthread_t pthread_next;
1229 * Start walking the mutexes the thread has taken since
1230 * taking this mutex.
1232 if (mutex == NULL) {
1234 * A null mutex means start at the beginning of the owned
1237 m = TAILQ_FIRST(&pthread->mutexq);
1239 /* There is no inherited priority yet. */
1244 * The caller wants to start after a specific mutex. It
1245 * is assumed that this mutex is a priority inheritence
1246 * mutex and that its priority has been correctly
1249 m = TAILQ_NEXT(mutex, m_qe);
1251 /* Start inheriting priority from the specified mutex. */
1252 inherited_prio = mutex->m_prio;
1254 active_prio = MAX(inherited_prio, pthread->base_priority);
1258 * We only want to deal with priority inheritence
1259 * mutexes. This might be optimized by only placing
1260 * priority inheritence mutexes into the owned mutex
1261 * list, but it may prove to be useful having all
1262 * owned mutexes in this list. Consider a thread
1263 * exiting while holding mutexes...
1265 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1267 * Fix the owners saved (inherited) priority to
1268 * reflect the priority of the previous mutex.
1270 m->m_saved_prio = inherited_prio;
1272 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1273 /* Recalculate the priority of the mutex: */
1274 m->m_prio = MAX(active_prio,
1275 pthread_next->active_priority);
1277 m->m_prio = active_prio;
1279 /* Recalculate new inherited and active priorities: */
1280 inherited_prio = m->m_prio;
1281 active_prio = MAX(m->m_prio, pthread->base_priority);
1284 /* Advance to the next mutex owned by this thread: */
1285 m = TAILQ_NEXT(m, m_qe);
1289 * Fix the threads inherited priority and recalculate its
1292 pthread->inherited_priority = inherited_prio;
1293 active_prio = MAX(inherited_prio, pthread->base_priority);
1295 if (active_prio != pthread->active_priority) {
1297 * If this thread is in the priority queue, it must be
1298 * removed and reinserted for its new priority.
1300 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1302 * Remove the thread from the priority queue
1303 * before changing its priority:
1305 PTHREAD_PRIOQ_REMOVE(pthread);
1308 * POSIX states that if the priority is being
1309 * lowered, the thread must be inserted at the
1310 * head of the queue for its priority if it owns
1311 * any priority protection or inheritence mutexes.
1313 if ((active_prio < pthread->active_priority) &&
1314 (pthread->priority_mutex_count > 0)) {
1315 /* Set the new active priority. */
1316 pthread->active_priority = active_prio;
1318 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1321 /* Set the new active priority. */
1322 pthread->active_priority = active_prio;
1324 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1328 /* Set the new active priority. */
1329 pthread->active_priority = active_prio;
1335 _mutex_unlock_private(pthread_t pthread)
1337 struct pthread_mutex *m, *m_next;
1339 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1340 m_next = TAILQ_NEXT(m, m_qe);
1341 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1342 pthread_mutex_unlock(&m);
1347 * Dequeue a waiting thread from the head of a mutex queue in descending
1350 static inline pthread_t
1351 mutex_queue_deq(pthread_mutex_t mutex)
1355 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1356 TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1357 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1360 * Only exit the loop if the thread hasn't been
1363 if (pthread->interrupted == 0)
1371 * Remove a waiting thread from a mutex queue in descending priority order.
1374 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1376 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1377 TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1378 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1383 * Enqueue a waiting thread to a queue in descending priority order.
1386 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1388 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1391 * For the common case of all threads having equal priority,
1392 * we perform a quick check against the priority of the thread
1393 * at the tail of the queue.
1395 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1396 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
1398 tid = TAILQ_FIRST(&mutex->m_queue);
1399 while (pthread->active_priority <= tid->active_priority)
1400 tid = TAILQ_NEXT(tid, qe);
1401 TAILQ_INSERT_BEFORE(tid, pthread, qe);
1403 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;