2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/queue.h>
37 #include "pthread_private.h"
39 #if defined(_PTHREADS_INVARIANTS)
40 #define _MUTEX_INIT_LINK(m) do { \
41 (m)->m_qe.tqe_prev = NULL; \
42 (m)->m_qe.tqe_next = NULL; \
44 #define _MUTEX_ASSERT_IS_OWNED(m) do { \
45 if ((m)->m_qe.tqe_prev == NULL) \
46 PANIC("mutex is not on list"); \
48 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \
49 if (((m)->m_qe.tqe_prev != NULL) || \
50 ((m)->m_qe.tqe_next != NULL)) \
51 PANIC("mutex is on list"); \
54 #define _MUTEX_INIT_LINK(m)
55 #define _MUTEX_ASSERT_IS_OWNED(m)
56 #define _MUTEX_ASSERT_NOT_OWNED(m)
62 static inline int mutex_self_trylock(pthread_mutex_t);
63 static inline int mutex_self_lock(pthread_mutex_t);
64 static inline int mutex_unlock_common(pthread_mutex_t *, int);
65 static void mutex_priority_adjust(pthread_mutex_t);
66 static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
67 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
68 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
69 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
72 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
74 static struct pthread_mutex_attr static_mutex_attr =
75 PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
76 static pthread_mutexattr_t static_mattr = &static_mutex_attr;
78 /* Single underscore versions provided for libc internal usage: */
79 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
80 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
82 /* No difference between libc and application usage of these: */
83 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
84 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
85 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
89 * Reinitialize a private mutex; this is only used for internal mutexes.
92 _mutex_reinit(pthread_mutex_t * mutex)
98 else if (*mutex == NULL)
99 ret = _pthread_mutex_init(mutex, NULL);
102 * Initialize the mutex structure:
104 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
105 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
106 TAILQ_INIT(&(*mutex)->m_queue);
107 (*mutex)->m_owner = NULL;
108 (*mutex)->m_data.m_count = 0;
109 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
110 (*mutex)->m_refcount = 0;
111 (*mutex)->m_prio = 0;
112 (*mutex)->m_saved_prio = 0;
113 _MUTEX_INIT_LINK(*mutex);
114 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
120 _pthread_mutex_init(pthread_mutex_t * mutex,
121 const pthread_mutexattr_t * mutex_attr)
123 enum pthread_mutextype type;
127 pthread_mutex_t pmutex;
133 /* Check if default mutex attributes: */
134 if (mutex_attr == NULL || *mutex_attr == NULL) {
135 /* Default to a (error checking) POSIX mutex: */
136 type = PTHREAD_MUTEX_ERRORCHECK;
137 protocol = PTHREAD_PRIO_NONE;
138 ceiling = PTHREAD_MAX_PRIORITY;
142 /* Check mutex type: */
143 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
144 ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
145 /* Return an invalid argument error: */
148 /* Check mutex protocol: */
149 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
150 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
151 /* Return an invalid argument error: */
155 /* Use the requested mutex type and protocol: */
156 type = (*mutex_attr)->m_type;
157 protocol = (*mutex_attr)->m_protocol;
158 ceiling = (*mutex_attr)->m_ceiling;
159 flags = (*mutex_attr)->m_flags;
162 /* Check no errors so far: */
164 if ((pmutex = (pthread_mutex_t)
165 malloc(sizeof(struct pthread_mutex))) == NULL)
168 /* Set the mutex flags: */
169 pmutex->m_flags = flags;
171 /* Process according to mutex type: */
173 /* case PTHREAD_MUTEX_DEFAULT: */
174 case PTHREAD_MUTEX_ERRORCHECK:
175 case PTHREAD_MUTEX_NORMAL:
176 /* Nothing to do here. */
179 /* Single UNIX Spec 2 recursive mutex: */
180 case PTHREAD_MUTEX_RECURSIVE:
181 /* Reset the mutex count: */
182 pmutex->m_data.m_count = 0;
185 /* Trap invalid mutex types: */
187 /* Return an invalid argument error: */
192 /* Initialise the rest of the mutex: */
193 TAILQ_INIT(&pmutex->m_queue);
194 pmutex->m_flags |= MUTEX_FLAGS_INITED;
195 pmutex->m_owner = NULL;
196 pmutex->m_type = type;
197 pmutex->m_protocol = protocol;
198 pmutex->m_refcount = 0;
199 if (protocol == PTHREAD_PRIO_PROTECT)
200 pmutex->m_prio = ceiling;
203 pmutex->m_saved_prio = 0;
204 _MUTEX_INIT_LINK(pmutex);
205 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
213 /* Return the completion status: */
218 _pthread_mutex_destroy(pthread_mutex_t * mutex)
222 if (mutex == NULL || *mutex == NULL)
225 /* Lock the mutex structure: */
226 _SPINLOCK(&(*mutex)->lock);
229 * Check to see if this mutex is in use:
231 if (((*mutex)->m_owner != NULL) ||
232 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
233 ((*mutex)->m_refcount != 0)) {
236 /* Unlock the mutex structure: */
237 _SPINUNLOCK(&(*mutex)->lock);
241 * Free the memory allocated for the mutex
244 _MUTEX_ASSERT_NOT_OWNED(*mutex);
248 * Leave the caller's pointer NULL now that
249 * the mutex has been destroyed:
255 /* Return the completion status: */
260 init_static(pthread_mutex_t *mutex)
264 _SPINLOCK(&static_init_lock);
267 ret = _pthread_mutex_init(mutex, NULL);
271 _SPINUNLOCK(&static_init_lock);
277 init_static_private(pthread_mutex_t *mutex)
281 _SPINLOCK(&static_init_lock);
284 ret = _pthread_mutex_init(mutex, &static_mattr);
288 _SPINUNLOCK(&static_init_lock);
294 mutex_trylock_common(pthread_mutex_t *mutex)
296 struct pthread *curthread = _get_curthread();
299 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
300 "Uninitialized mutex in pthread_mutex_trylock_basic");
303 * Defer signals to protect the scheduling queues from
304 * access by the signal handler:
306 _thread_kern_sig_defer();
308 /* Lock the mutex structure: */
309 _SPINLOCK(&(*mutex)->lock);
312 * If the mutex was statically allocated, properly
313 * initialize the tail queue.
315 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
316 TAILQ_INIT(&(*mutex)->m_queue);
317 _MUTEX_INIT_LINK(*mutex);
318 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
321 /* Process according to mutex type: */
322 switch ((*mutex)->m_protocol) {
323 /* Default POSIX mutex: */
324 case PTHREAD_PRIO_NONE:
325 /* Check if this mutex is not locked: */
326 if ((*mutex)->m_owner == NULL) {
327 /* Lock the mutex for the running thread: */
328 (*mutex)->m_owner = curthread;
330 /* Add to the list of owned mutexes: */
331 _MUTEX_ASSERT_NOT_OWNED(*mutex);
332 TAILQ_INSERT_TAIL(&curthread->mutexq,
334 } else if ((*mutex)->m_owner == curthread)
335 ret = mutex_self_trylock(*mutex);
337 /* Return a busy error: */
341 /* POSIX priority inheritence mutex: */
342 case PTHREAD_PRIO_INHERIT:
343 /* Check if this mutex is not locked: */
344 if ((*mutex)->m_owner == NULL) {
345 /* Lock the mutex for the running thread: */
346 (*mutex)->m_owner = curthread;
348 /* Track number of priority mutexes owned: */
349 curthread->priority_mutex_count++;
352 * The mutex takes on the attributes of the
353 * running thread when there are no waiters.
355 (*mutex)->m_prio = curthread->active_priority;
356 (*mutex)->m_saved_prio =
357 curthread->inherited_priority;
359 /* Add to the list of owned mutexes: */
360 _MUTEX_ASSERT_NOT_OWNED(*mutex);
361 TAILQ_INSERT_TAIL(&curthread->mutexq,
363 } else if ((*mutex)->m_owner == curthread)
364 ret = mutex_self_trylock(*mutex);
366 /* Return a busy error: */
370 /* POSIX priority protection mutex: */
371 case PTHREAD_PRIO_PROTECT:
372 /* Check for a priority ceiling violation: */
373 if (curthread->active_priority > (*mutex)->m_prio)
376 /* Check if this mutex is not locked: */
377 else if ((*mutex)->m_owner == NULL) {
378 /* Lock the mutex for the running thread: */
379 (*mutex)->m_owner = curthread;
381 /* Track number of priority mutexes owned: */
382 curthread->priority_mutex_count++;
385 * The running thread inherits the ceiling
386 * priority of the mutex and executes at that
389 curthread->active_priority = (*mutex)->m_prio;
390 (*mutex)->m_saved_prio =
391 curthread->inherited_priority;
392 curthread->inherited_priority =
395 /* Add to the list of owned mutexes: */
396 _MUTEX_ASSERT_NOT_OWNED(*mutex);
397 TAILQ_INSERT_TAIL(&curthread->mutexq,
399 } else if ((*mutex)->m_owner == curthread)
400 ret = mutex_self_trylock(*mutex);
402 /* Return a busy error: */
406 /* Trap invalid mutex types: */
408 /* Return an invalid argument error: */
413 /* Unlock the mutex structure: */
414 _SPINUNLOCK(&(*mutex)->lock);
417 * Undefer and handle pending signals, yielding if
420 _thread_kern_sig_undefer();
422 /* Return the completion status: */
427 __pthread_mutex_trylock(pthread_mutex_t *mutex)
435 * If the mutex is statically initialized, perform the dynamic
438 else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
439 ret = mutex_trylock_common(mutex);
445 _pthread_mutex_trylock(pthread_mutex_t *mutex)
453 * If the mutex is statically initialized, perform the dynamic
454 * initialization marking the mutex private (delete safe):
456 else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
457 ret = mutex_trylock_common(mutex);
463 mutex_lock_common(pthread_mutex_t * mutex)
465 struct pthread *curthread = _get_curthread();
468 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
469 "Uninitialized mutex in pthread_mutex_trylock_basic");
471 /* Reset the interrupted flag: */
472 curthread->interrupted = 0;
475 * Enter a loop waiting to become the mutex owner. We need a
476 * loop in case the waiting thread is interrupted by a signal
477 * to execute a signal handler. It is not (currently) possible
478 * to remain in the waiting queue while running a handler.
479 * Instead, the thread is interrupted and backed out of the
480 * waiting queue prior to executing the signal handler.
484 * Defer signals to protect the scheduling queues from
485 * access by the signal handler:
487 _thread_kern_sig_defer();
489 /* Lock the mutex structure: */
490 _SPINLOCK(&(*mutex)->lock);
493 * If the mutex was statically allocated, properly
494 * initialize the tail queue.
496 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
497 TAILQ_INIT(&(*mutex)->m_queue);
498 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
499 _MUTEX_INIT_LINK(*mutex);
502 /* Process according to mutex type: */
503 switch ((*mutex)->m_protocol) {
504 /* Default POSIX mutex: */
505 case PTHREAD_PRIO_NONE:
506 if ((*mutex)->m_owner == NULL) {
507 /* Lock the mutex for this thread: */
508 (*mutex)->m_owner = curthread;
510 /* Add to the list of owned mutexes: */
511 _MUTEX_ASSERT_NOT_OWNED(*mutex);
512 TAILQ_INSERT_TAIL(&curthread->mutexq,
515 } else if ((*mutex)->m_owner == curthread)
516 ret = mutex_self_lock(*mutex);
519 * Join the queue of threads waiting to lock
522 mutex_queue_enq(*mutex, curthread);
525 * Keep a pointer to the mutex this thread
528 curthread->data.mutex = *mutex;
531 * Unlock the mutex structure and schedule the
534 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
535 &(*mutex)->lock, __FILE__, __LINE__);
537 /* Lock the mutex structure again: */
538 _SPINLOCK(&(*mutex)->lock);
542 /* POSIX priority inheritence mutex: */
543 case PTHREAD_PRIO_INHERIT:
544 /* Check if this mutex is not locked: */
545 if ((*mutex)->m_owner == NULL) {
546 /* Lock the mutex for this thread: */
547 (*mutex)->m_owner = curthread;
549 /* Track number of priority mutexes owned: */
550 curthread->priority_mutex_count++;
553 * The mutex takes on attributes of the
554 * running thread when there are no waiters.
556 (*mutex)->m_prio = curthread->active_priority;
557 (*mutex)->m_saved_prio =
558 curthread->inherited_priority;
559 curthread->inherited_priority =
562 /* Add to the list of owned mutexes: */
563 _MUTEX_ASSERT_NOT_OWNED(*mutex);
564 TAILQ_INSERT_TAIL(&curthread->mutexq,
567 } else if ((*mutex)->m_owner == curthread)
568 ret = mutex_self_lock(*mutex);
571 * Join the queue of threads waiting to lock
574 mutex_queue_enq(*mutex, curthread);
577 * Keep a pointer to the mutex this thread
580 curthread->data.mutex = *mutex;
582 if (curthread->active_priority >
584 /* Adjust priorities: */
585 mutex_priority_adjust(*mutex);
588 * Unlock the mutex structure and schedule the
591 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
592 &(*mutex)->lock, __FILE__, __LINE__);
594 /* Lock the mutex structure again: */
595 _SPINLOCK(&(*mutex)->lock);
599 /* POSIX priority protection mutex: */
600 case PTHREAD_PRIO_PROTECT:
601 /* Check for a priority ceiling violation: */
602 if (curthread->active_priority > (*mutex)->m_prio)
605 /* Check if this mutex is not locked: */
606 else if ((*mutex)->m_owner == NULL) {
608 * Lock the mutex for the running
611 (*mutex)->m_owner = curthread;
613 /* Track number of priority mutexes owned: */
614 curthread->priority_mutex_count++;
617 * The running thread inherits the ceiling
618 * priority of the mutex and executes at that
621 curthread->active_priority = (*mutex)->m_prio;
622 (*mutex)->m_saved_prio =
623 curthread->inherited_priority;
624 curthread->inherited_priority =
627 /* Add to the list of owned mutexes: */
628 _MUTEX_ASSERT_NOT_OWNED(*mutex);
629 TAILQ_INSERT_TAIL(&curthread->mutexq,
631 } else if ((*mutex)->m_owner == curthread)
632 ret = mutex_self_lock(*mutex);
635 * Join the queue of threads waiting to lock
638 mutex_queue_enq(*mutex, curthread);
641 * Keep a pointer to the mutex this thread
644 curthread->data.mutex = *mutex;
646 /* Clear any previous error: */
647 curthread->error = 0;
650 * Unlock the mutex structure and schedule the
653 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
654 &(*mutex)->lock, __FILE__, __LINE__);
656 /* Lock the mutex structure again: */
657 _SPINLOCK(&(*mutex)->lock);
660 * The threads priority may have changed while
661 * waiting for the mutex causing a ceiling
664 ret = curthread->error;
665 curthread->error = 0;
669 /* Trap invalid mutex types: */
671 /* Return an invalid argument error: */
677 * Check to see if this thread was interrupted and
678 * is still in the mutex queue of waiting threads:
680 if (curthread->interrupted != 0)
681 mutex_queue_remove(*mutex, curthread);
683 /* Unlock the mutex structure: */
684 _SPINUNLOCK(&(*mutex)->lock);
687 * Undefer and handle pending signals, yielding if
690 _thread_kern_sig_undefer();
691 } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
692 (curthread->interrupted == 0));
694 if (curthread->interrupted != 0 &&
695 curthread->continuation != NULL)
696 curthread->continuation((void *) curthread);
698 /* Return the completion status: */
703 __pthread_mutex_lock(pthread_mutex_t *mutex)
707 if (_thread_initial == NULL)
714 * If the mutex is statically initialized, perform the dynamic
717 else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
718 ret = mutex_lock_common(mutex);
724 _pthread_mutex_lock(pthread_mutex_t *mutex)
728 if (_thread_initial == NULL)
735 * If the mutex is statically initialized, perform the dynamic
736 * initialization marking it private (delete safe):
738 else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
739 ret = mutex_lock_common(mutex);
745 _pthread_mutex_unlock(pthread_mutex_t * mutex)
747 return (mutex_unlock_common(mutex, /* add reference */ 0));
751 _mutex_cv_unlock(pthread_mutex_t * mutex)
753 return (mutex_unlock_common(mutex, /* add reference */ 1));
757 _mutex_cv_lock(pthread_mutex_t * mutex)
760 if ((ret = _pthread_mutex_lock(mutex)) == 0)
761 (*mutex)->m_refcount--;
766 mutex_self_trylock(pthread_mutex_t mutex)
770 switch (mutex->m_type) {
772 /* case PTHREAD_MUTEX_DEFAULT: */
773 case PTHREAD_MUTEX_ERRORCHECK:
774 case PTHREAD_MUTEX_NORMAL:
776 * POSIX specifies that mutexes should return EDEADLK if a
777 * recursive lock is detected.
782 case PTHREAD_MUTEX_RECURSIVE:
783 /* Increment the lock count: */
784 mutex->m_data.m_count++;
788 /* Trap invalid mutex types; */
796 mutex_self_lock(pthread_mutex_t mutex)
800 switch (mutex->m_type) {
801 /* case PTHREAD_MUTEX_DEFAULT: */
802 case PTHREAD_MUTEX_ERRORCHECK:
804 * POSIX specifies that mutexes should return EDEADLK if a
805 * recursive lock is detected.
810 case PTHREAD_MUTEX_NORMAL:
812 * What SS2 define as a 'normal' mutex. Intentionally
813 * deadlock on attempts to get a lock you already own.
815 _thread_kern_sched_state_unlock(PS_DEADLOCK,
816 &mutex->lock, __FILE__, __LINE__);
819 case PTHREAD_MUTEX_RECURSIVE:
820 /* Increment the lock count: */
821 mutex->m_data.m_count++;
825 /* Trap invalid mutex types; */
833 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
835 struct pthread *curthread = _get_curthread();
838 if (mutex == NULL || *mutex == NULL) {
842 * Defer signals to protect the scheduling queues from
843 * access by the signal handler:
845 _thread_kern_sig_defer();
847 /* Lock the mutex structure: */
848 _SPINLOCK(&(*mutex)->lock);
850 /* Process according to mutex type: */
851 switch ((*mutex)->m_protocol) {
852 /* Default POSIX mutex: */
853 case PTHREAD_PRIO_NONE:
855 * Check if the running thread is not the owner of the
858 if ((*mutex)->m_owner != curthread) {
860 * Return an invalid argument error for no
861 * owner and a permission error otherwise:
863 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
865 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
866 ((*mutex)->m_data.m_count > 0)) {
867 /* Decrement the count: */
868 (*mutex)->m_data.m_count--;
871 * Clear the count in case this is recursive
874 (*mutex)->m_data.m_count = 0;
876 /* Remove the mutex from the threads queue. */
877 _MUTEX_ASSERT_IS_OWNED(*mutex);
878 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
880 _MUTEX_INIT_LINK(*mutex);
883 * Get the next thread from the queue of
884 * threads waiting on the mutex:
886 if (((*mutex)->m_owner =
887 mutex_queue_deq(*mutex)) != NULL) {
888 /* Make the new owner runnable: */
889 PTHREAD_NEW_STATE((*mutex)->m_owner,
893 * Add the mutex to the threads list of
896 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
900 * The owner is no longer waiting for
903 (*mutex)->m_owner->data.mutex = NULL;
908 /* POSIX priority inheritence mutex: */
909 case PTHREAD_PRIO_INHERIT:
911 * Check if the running thread is not the owner of the
914 if ((*mutex)->m_owner != curthread) {
916 * Return an invalid argument error for no
917 * owner and a permission error otherwise:
919 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
921 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
922 ((*mutex)->m_data.m_count > 0)) {
923 /* Decrement the count: */
924 (*mutex)->m_data.m_count--;
927 * Clear the count in case this is recursive
930 (*mutex)->m_data.m_count = 0;
933 * Restore the threads inherited priority and
934 * recompute the active priority (being careful
935 * not to override changes in the threads base
936 * priority subsequent to locking the mutex).
938 curthread->inherited_priority =
939 (*mutex)->m_saved_prio;
940 curthread->active_priority =
941 MAX(curthread->inherited_priority,
942 curthread->base_priority);
945 * This thread now owns one less priority mutex.
947 curthread->priority_mutex_count--;
949 /* Remove the mutex from the threads queue. */
950 _MUTEX_ASSERT_IS_OWNED(*mutex);
951 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
953 _MUTEX_INIT_LINK(*mutex);
956 * Get the next thread from the queue of threads
957 * waiting on the mutex:
959 if (((*mutex)->m_owner =
960 mutex_queue_deq(*mutex)) == NULL)
961 /* This mutex has no priority. */
962 (*mutex)->m_prio = 0;
965 * Track number of priority mutexes owned:
967 (*mutex)->m_owner->priority_mutex_count++;
970 * Add the mutex to the threads list
973 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
977 * The owner is no longer waiting for
980 (*mutex)->m_owner->data.mutex = NULL;
983 * Set the priority of the mutex. Since
984 * our waiting threads are in descending
985 * priority order, the priority of the
986 * mutex becomes the active priority of
987 * the thread we just dequeued.
990 (*mutex)->m_owner->active_priority;
993 * Save the owning threads inherited
996 (*mutex)->m_saved_prio =
997 (*mutex)->m_owner->inherited_priority;
1000 * The owning threads inherited priority
1001 * now becomes his active priority (the
1002 * priority of the mutex).
1004 (*mutex)->m_owner->inherited_priority =
1008 * Make the new owner runnable:
1010 PTHREAD_NEW_STATE((*mutex)->m_owner,
1016 /* POSIX priority ceiling mutex: */
1017 case PTHREAD_PRIO_PROTECT:
1019 * Check if the running thread is not the owner of the
1022 if ((*mutex)->m_owner != curthread) {
1024 * Return an invalid argument error for no
1025 * owner and a permission error otherwise:
1027 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
1029 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1030 ((*mutex)->m_data.m_count > 0)) {
1031 /* Decrement the count: */
1032 (*mutex)->m_data.m_count--;
1035 * Clear the count in case this is recursive
1038 (*mutex)->m_data.m_count = 0;
1041 * Restore the threads inherited priority and
1042 * recompute the active priority (being careful
1043 * not to override changes in the threads base
1044 * priority subsequent to locking the mutex).
1046 curthread->inherited_priority =
1047 (*mutex)->m_saved_prio;
1048 curthread->active_priority =
1049 MAX(curthread->inherited_priority,
1050 curthread->base_priority);
1053 * This thread now owns one less priority mutex.
1055 curthread->priority_mutex_count--;
1057 /* Remove the mutex from the threads queue. */
1058 _MUTEX_ASSERT_IS_OWNED(*mutex);
1059 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
1061 _MUTEX_INIT_LINK(*mutex);
1064 * Enter a loop to find a waiting thread whose
1065 * active priority will not cause a ceiling
1068 while ((((*mutex)->m_owner =
1069 mutex_queue_deq(*mutex)) != NULL) &&
1070 ((*mutex)->m_owner->active_priority >
1071 (*mutex)->m_prio)) {
1073 * Either the mutex ceiling priority
1074 * been lowered and/or this threads
1075 * priority has been raised subsequent
1076 * to this thread being queued on the
1079 (*mutex)->m_owner->error = EINVAL;
1080 PTHREAD_NEW_STATE((*mutex)->m_owner,
1083 * The thread is no longer waiting for
1086 (*mutex)->m_owner->data.mutex = NULL;
1089 /* Check for a new owner: */
1090 if ((*mutex)->m_owner != NULL) {
1092 * Track number of priority mutexes owned:
1094 (*mutex)->m_owner->priority_mutex_count++;
1097 * Add the mutex to the threads list
1100 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1104 * The owner is no longer waiting for
1107 (*mutex)->m_owner->data.mutex = NULL;
1110 * Save the owning threads inherited
1113 (*mutex)->m_saved_prio =
1114 (*mutex)->m_owner->inherited_priority;
1117 * The owning thread inherits the
1118 * ceiling priority of the mutex and
1119 * executes at that priority:
1121 (*mutex)->m_owner->inherited_priority =
1123 (*mutex)->m_owner->active_priority =
1127 * Make the new owner runnable:
1129 PTHREAD_NEW_STATE((*mutex)->m_owner,
1135 /* Trap invalid mutex types: */
1137 /* Return an invalid argument error: */
1142 if ((ret == 0) && (add_reference != 0)) {
1143 /* Increment the reference count: */
1144 (*mutex)->m_refcount++;
1147 /* Unlock the mutex structure: */
1148 _SPINUNLOCK(&(*mutex)->lock);
1151 * Undefer and handle pending signals, yielding if
1154 _thread_kern_sig_undefer();
1157 /* Return the completion status: */
1163 * This function is called when a change in base priority occurs for
1164 * a thread that is holding or waiting for a priority protection or
1165 * inheritence mutex. A change in a threads base priority can effect
1166 * changes to active priorities of other threads and to the ordering
1167 * of mutex locking by waiting threads.
1169 * This must be called while thread scheduling is deferred.
1172 _mutex_notify_priochange(pthread_t pthread)
1174 /* Adjust the priorites of any owned priority mutexes: */
1175 if (pthread->priority_mutex_count > 0) {
1177 * Rescan the mutexes owned by this thread and correct
1178 * their priorities to account for this threads change
1179 * in priority. This has the side effect of changing
1180 * the threads active priority.
1182 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1186 * If this thread is waiting on a priority inheritence mutex,
1187 * check for priority adjustments. A change in priority can
1188 * also effect a ceiling violation(*) for a thread waiting on
1189 * a priority protection mutex; we don't perform the check here
1190 * as it is done in pthread_mutex_unlock.
1192 * (*) It should be noted that a priority change to a thread
1193 * _after_ taking and owning a priority ceiling mutex
1194 * does not affect ownership of that mutex; the ceiling
1195 * priority is only checked before mutex ownership occurs.
1197 if (pthread->state == PS_MUTEX_WAIT) {
1198 /* Lock the mutex structure: */
1199 _SPINLOCK(&pthread->data.mutex->lock);
1202 * Check to make sure this thread is still in the same state
1203 * (the spinlock above can yield the CPU to another thread):
1205 if (pthread->state == PS_MUTEX_WAIT) {
1207 * Remove and reinsert this thread into the list of
1208 * waiting threads to preserve decreasing priority
1211 mutex_queue_remove(pthread->data.mutex, pthread);
1212 mutex_queue_enq(pthread->data.mutex, pthread);
1214 if (pthread->data.mutex->m_protocol ==
1215 PTHREAD_PRIO_INHERIT) {
1216 /* Adjust priorities: */
1217 mutex_priority_adjust(pthread->data.mutex);
1221 /* Unlock the mutex structure: */
1222 _SPINUNLOCK(&pthread->data.mutex->lock);
1227 * Called when a new thread is added to the mutex waiting queue or
1228 * when a threads priority changes that is already in the mutex
1232 mutex_priority_adjust(pthread_mutex_t mutex)
1234 pthread_t pthread_next, pthread = mutex->m_owner;
1236 pthread_mutex_t m = mutex;
1239 * Calculate the mutex priority as the maximum of the highest
1240 * active priority of any waiting threads and the owning threads
1241 * active priority(*).
1243 * (*) Because the owning threads current active priority may
1244 * reflect priority inherited from this mutex (and the mutex
1245 * priority may have changed) we must recalculate the active
1246 * priority based on the threads saved inherited priority
1247 * and its base priority.
1249 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1250 temp_prio = MAX(pthread_next->active_priority,
1251 MAX(m->m_saved_prio, pthread->base_priority));
1253 /* See if this mutex really needs adjusting: */
1254 if (temp_prio == m->m_prio)
1255 /* No need to propagate the priority: */
1258 /* Set new priority of the mutex: */
1259 m->m_prio = temp_prio;
1263 * Save the threads priority before rescanning the
1266 temp_prio = pthread->active_priority;
1269 * Fix the priorities for all the mutexes this thread has
1270 * locked since taking this mutex. This also has a
1271 * potential side-effect of changing the threads priority.
1273 mutex_rescan_owned(pthread, m);
1276 * If the thread is currently waiting on a mutex, check
1277 * to see if the threads new priority has affected the
1278 * priority of the mutex.
1280 if ((temp_prio != pthread->active_priority) &&
1281 (pthread->state == PS_MUTEX_WAIT) &&
1282 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1283 /* Grab the mutex this thread is waiting on: */
1284 m = pthread->data.mutex;
1287 * The priority for this thread has changed. Remove
1288 * and reinsert this thread into the list of waiting
1289 * threads to preserve decreasing priority order.
1291 mutex_queue_remove(m, pthread);
1292 mutex_queue_enq(m, pthread);
1294 /* Grab the waiting thread with highest priority: */
1295 pthread_next = TAILQ_FIRST(&m->m_queue);
1298 * Calculate the mutex priority as the maximum of the
1299 * highest active priority of any waiting threads and
1300 * the owning threads active priority.
1302 temp_prio = MAX(pthread_next->active_priority,
1303 MAX(m->m_saved_prio, m->m_owner->base_priority));
1305 if (temp_prio != m->m_prio) {
1307 * The priority needs to be propagated to the
1308 * mutex this thread is waiting on and up to
1309 * the owner of that mutex.
1311 m->m_prio = temp_prio;
1312 pthread = m->m_owner;
1326 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1328 int active_prio, inherited_prio;
1330 pthread_t pthread_next;
1333 * Start walking the mutexes the thread has taken since
1334 * taking this mutex.
1336 if (mutex == NULL) {
1338 * A null mutex means start at the beginning of the owned
1341 m = TAILQ_FIRST(&pthread->mutexq);
1343 /* There is no inherited priority yet. */
1348 * The caller wants to start after a specific mutex. It
1349 * is assumed that this mutex is a priority inheritence
1350 * mutex and that its priority has been correctly
1353 m = TAILQ_NEXT(mutex, m_qe);
1355 /* Start inheriting priority from the specified mutex. */
1356 inherited_prio = mutex->m_prio;
1358 active_prio = MAX(inherited_prio, pthread->base_priority);
1362 * We only want to deal with priority inheritence
1363 * mutexes. This might be optimized by only placing
1364 * priority inheritence mutexes into the owned mutex
1365 * list, but it may prove to be useful having all
1366 * owned mutexes in this list. Consider a thread
1367 * exiting while holding mutexes...
1369 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1371 * Fix the owners saved (inherited) priority to
1372 * reflect the priority of the previous mutex.
1374 m->m_saved_prio = inherited_prio;
1376 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1377 /* Recalculate the priority of the mutex: */
1378 m->m_prio = MAX(active_prio,
1379 pthread_next->active_priority);
1381 m->m_prio = active_prio;
1383 /* Recalculate new inherited and active priorities: */
1384 inherited_prio = m->m_prio;
1385 active_prio = MAX(m->m_prio, pthread->base_priority);
1388 /* Advance to the next mutex owned by this thread: */
1389 m = TAILQ_NEXT(m, m_qe);
1393 * Fix the threads inherited priority and recalculate its
1396 pthread->inherited_priority = inherited_prio;
1397 active_prio = MAX(inherited_prio, pthread->base_priority);
1399 if (active_prio != pthread->active_priority) {
1401 * If this thread is in the priority queue, it must be
1402 * removed and reinserted for its new priority.
1404 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1406 * Remove the thread from the priority queue
1407 * before changing its priority:
1409 PTHREAD_PRIOQ_REMOVE(pthread);
1412 * POSIX states that if the priority is being
1413 * lowered, the thread must be inserted at the
1414 * head of the queue for its priority if it owns
1415 * any priority protection or inheritence mutexes.
1417 if ((active_prio < pthread->active_priority) &&
1418 (pthread->priority_mutex_count > 0)) {
1419 /* Set the new active priority. */
1420 pthread->active_priority = active_prio;
1422 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1425 /* Set the new active priority. */
1426 pthread->active_priority = active_prio;
1428 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1432 /* Set the new active priority. */
1433 pthread->active_priority = active_prio;
1439 _mutex_unlock_private(pthread_t pthread)
1441 struct pthread_mutex *m, *m_next;
1443 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1444 m_next = TAILQ_NEXT(m, m_qe);
1445 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1446 _pthread_mutex_unlock(&m);
1451 _mutex_lock_backout(pthread_t pthread)
1453 struct pthread_mutex *mutex;
1456 * Defer signals to protect the scheduling queues from
1457 * access by the signal handler:
1459 _thread_kern_sig_defer();
1460 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1461 mutex = pthread->data.mutex;
1463 /* Lock the mutex structure: */
1464 _SPINLOCK(&mutex->lock);
1466 mutex_queue_remove(mutex, pthread);
1468 /* This thread is no longer waiting for the mutex: */
1469 pthread->data.mutex = NULL;
1471 /* Unlock the mutex structure: */
1472 _SPINUNLOCK(&mutex->lock);
1476 * Undefer and handle pending signals, yielding if
1479 _thread_kern_sig_undefer();
1483 * Dequeue a waiting thread from the head of a mutex queue in descending
1486 static inline pthread_t
1487 mutex_queue_deq(pthread_mutex_t mutex)
1491 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1492 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1493 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1496 * Only exit the loop if the thread hasn't been
1499 if (pthread->interrupted == 0)
1507 * Remove a waiting thread from a mutex queue in descending priority order.
1510 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1512 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1513 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1514 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1519 * Enqueue a waiting thread to a queue in descending priority order.
1522 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1524 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1526 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1528 * For the common case of all threads having equal priority,
1529 * we perform a quick check against the priority of the thread
1530 * at the tail of the queue.
1532 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1533 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1535 tid = TAILQ_FIRST(&mutex->m_queue);
1536 while (pthread->active_priority <= tid->active_priority)
1537 tid = TAILQ_NEXT(tid, sqe);
1538 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1540 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;