2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include "namespace.h"
36 #include <sys/param.h>
37 #include <sys/queue.h>
39 #include "un-namespace.h"
40 #include "thr_private.h"
42 #if defined(_PTHREADS_INVARIANTS)
43 #define MUTEX_INIT_LINK(m) do { \
44 (m)->m_qe.tqe_prev = NULL; \
45 (m)->m_qe.tqe_next = NULL; \
47 #define MUTEX_ASSERT_IS_OWNED(m) do { \
48 if ((m)->m_qe.tqe_prev == NULL) \
49 PANIC("mutex is not on list"); \
51 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
52 if (((m)->m_qe.tqe_prev != NULL) || \
53 ((m)->m_qe.tqe_next != NULL)) \
54 PANIC("mutex is on list"); \
56 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
57 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58 "thread in syncq when it shouldn't be."); \
61 #define MUTEX_INIT_LINK(m)
62 #define MUTEX_ASSERT_IS_OWNED(m)
63 #define MUTEX_ASSERT_NOT_OWNED(m)
64 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
67 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
68 #define MUTEX_DESTROY(m) do { \
69 _lock_destroy(&(m)->m_lock); \
77 static struct kse_mailbox *mutex_handoff(struct pthread *,
78 struct pthread_mutex *);
79 static inline int mutex_self_trylock(pthread_mutex_t);
80 static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
81 static int mutex_unlock_common(pthread_mutex_t *, int);
82 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83 static void mutex_rescan_owned (struct pthread *, struct pthread *,
84 struct pthread_mutex *);
85 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
86 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
87 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
88 static void mutex_lock_backout(void *arg);
90 int __pthread_mutex_init(pthread_mutex_t *mutex,
91 const pthread_mutexattr_t *mutex_attr);
92 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
93 int __pthread_mutex_lock(pthread_mutex_t *m);
94 int __pthread_mutex_timedlock(pthread_mutex_t *m,
95 const struct timespec *abs_timeout);
96 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
97 void *(calloc_cb)(size_t, size_t));
100 static struct pthread_mutex_attr static_mutex_attr =
101 PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
102 static pthread_mutexattr_t static_mattr = &static_mutex_attr;
104 LT10_COMPAT_PRIVATE(__pthread_mutex_init);
105 LT10_COMPAT_PRIVATE(_pthread_mutex_init);
106 LT10_COMPAT_DEFAULT(pthread_mutex_init);
107 LT10_COMPAT_PRIVATE(__pthread_mutex_lock);
108 LT10_COMPAT_PRIVATE(_pthread_mutex_lock);
109 LT10_COMPAT_DEFAULT(pthread_mutex_lock);
110 LT10_COMPAT_PRIVATE(__pthread_mutex_timedlock);
111 LT10_COMPAT_PRIVATE(_pthread_mutex_timedlock);
112 LT10_COMPAT_DEFAULT(pthread_mutex_timedlock);
113 LT10_COMPAT_PRIVATE(__pthread_mutex_trylock);
114 LT10_COMPAT_PRIVATE(_pthread_mutex_trylock);
115 LT10_COMPAT_DEFAULT(pthread_mutex_trylock);
116 LT10_COMPAT_PRIVATE(_pthread_mutex_destroy);
117 LT10_COMPAT_DEFAULT(pthread_mutex_destroy);
118 LT10_COMPAT_PRIVATE(_pthread_mutex_unlock);
119 LT10_COMPAT_DEFAULT(pthread_mutex_unlock);
121 /* Single underscore versions provided for libc internal usage: */
122 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
123 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
124 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
125 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
127 /* No difference between libc and application usage of these: */
128 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
129 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
132 thr_mutex_init(pthread_mutex_t *mutex,
133 const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
135 struct pthread_mutex *pmutex;
136 enum pthread_mutextype type;
145 /* Check if default mutex attributes: */
146 else if (mutex_attr == NULL || *mutex_attr == NULL) {
147 /* Default to a (error checking) POSIX mutex: */
148 type = PTHREAD_MUTEX_ERRORCHECK;
149 protocol = PTHREAD_PRIO_NONE;
150 ceiling = THR_MAX_PRIORITY;
154 /* Check mutex type: */
155 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
156 ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
157 /* Return an invalid argument error: */
160 /* Check mutex protocol: */
161 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
162 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
163 /* Return an invalid argument error: */
167 /* Use the requested mutex type and protocol: */
168 type = (*mutex_attr)->m_type;
169 protocol = (*mutex_attr)->m_protocol;
170 ceiling = (*mutex_attr)->m_ceiling;
171 flags = (*mutex_attr)->m_flags;
174 /* Check no errors so far: */
176 if ((pmutex = (pthread_mutex_t)
177 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
179 else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
180 _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
185 /* Set the mutex flags: */
186 pmutex->m_flags = flags;
188 /* Process according to mutex type: */
190 /* case PTHREAD_MUTEX_DEFAULT: */
191 case PTHREAD_MUTEX_ERRORCHECK:
192 case PTHREAD_MUTEX_NORMAL:
193 case PTHREAD_MUTEX_ADAPTIVE_NP:
194 /* Nothing to do here. */
197 /* Single UNIX Spec 2 recursive mutex: */
198 case PTHREAD_MUTEX_RECURSIVE:
199 /* Reset the mutex count: */
203 /* Trap invalid mutex types: */
205 /* Return an invalid argument error: */
210 /* Initialise the rest of the mutex: */
211 TAILQ_INIT(&pmutex->m_queue);
212 pmutex->m_flags |= MUTEX_FLAGS_INITED;
213 pmutex->m_owner = NULL;
214 pmutex->m_type = type;
215 pmutex->m_protocol = protocol;
216 pmutex->m_refcount = 0;
217 if (protocol == PTHREAD_PRIO_PROTECT)
218 pmutex->m_prio = ceiling;
221 pmutex->m_saved_prio = 0;
222 MUTEX_INIT_LINK(pmutex);
225 /* Free the mutex lock structure: */
226 MUTEX_DESTROY(pmutex);
231 /* Return the completion status: */
236 __pthread_mutex_init(pthread_mutex_t *mutex,
237 const pthread_mutexattr_t *mutex_attr)
240 return (thr_mutex_init(mutex, mutex_attr, calloc));
244 _pthread_mutex_init(pthread_mutex_t *mutex,
245 const pthread_mutexattr_t *mutex_attr)
247 struct pthread_mutex_attr mattr, *mattrp;
249 if ((mutex_attr == NULL) || (*mutex_attr == NULL))
250 return (__pthread_mutex_init(mutex, &static_mattr));
252 mattr = **mutex_attr;
253 mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
255 return (__pthread_mutex_init(mutex, &mattrp));
259 /* This function is used internally by malloc. */
261 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
262 void *(calloc_cb)(size_t, size_t))
264 static const struct pthread_mutex_attr attr = {
265 .m_type = PTHREAD_MUTEX_NORMAL,
266 .m_protocol = PTHREAD_PRIO_NONE,
270 static const struct pthread_mutex_attr *pattr = &attr;
272 return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&pattr,
277 _thr_mutex_reinit(pthread_mutex_t *mutex)
279 _lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
280 _thr_lock_wait, _thr_lock_wakeup);
281 TAILQ_INIT(&(*mutex)->m_queue);
282 (*mutex)->m_owner = NULL;
283 (*mutex)->m_count = 0;
284 (*mutex)->m_refcount = 0;
285 (*mutex)->m_prio = 0;
286 (*mutex)->m_saved_prio = 0;
290 _pthread_mutex_destroy(pthread_mutex_t *mutex)
292 struct pthread *curthread = _get_curthread();
296 if (mutex == NULL || *mutex == NULL)
299 /* Lock the mutex structure: */
300 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
303 * Check to see if this mutex is in use:
305 if (((*mutex)->m_owner != NULL) ||
306 (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
307 ((*mutex)->m_refcount != 0)) {
310 /* Unlock the mutex structure: */
311 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
314 * Save a pointer to the mutex so it can be free'd
315 * and set the caller's pointer to NULL:
320 /* Unlock the mutex structure: */
321 THR_LOCK_RELEASE(curthread, &m->m_lock);
324 * Free the memory allocated for the mutex
327 MUTEX_ASSERT_NOT_OWNED(m);
332 /* Return the completion status: */
337 init_static(struct pthread *thread, pthread_mutex_t *mutex)
341 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
344 ret = _pthread_mutex_init(mutex, NULL);
348 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
354 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
358 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
361 ret = _pthread_mutex_init(mutex, &static_mattr);
365 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
371 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
376 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
377 "Uninitialized mutex in pthread_mutex_trylock_basic");
379 /* Lock the mutex structure: */
380 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
381 private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
384 * If the mutex was statically allocated, properly
385 * initialize the tail queue.
387 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
388 TAILQ_INIT(&(*mutex)->m_queue);
389 MUTEX_INIT_LINK(*mutex);
390 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
393 /* Process according to mutex type: */
394 switch ((*mutex)->m_protocol) {
395 /* Default POSIX mutex: */
396 case PTHREAD_PRIO_NONE:
397 /* Check if this mutex is not locked: */
398 if ((*mutex)->m_owner == NULL) {
399 /* Lock the mutex for the running thread: */
400 (*mutex)->m_owner = curthread;
402 /* Add to the list of owned mutexes: */
403 MUTEX_ASSERT_NOT_OWNED(*mutex);
404 TAILQ_INSERT_TAIL(&curthread->mutexq,
406 } else if ((*mutex)->m_owner == curthread)
407 ret = mutex_self_trylock(*mutex);
409 /* Return a busy error: */
413 /* POSIX priority inheritence mutex: */
414 case PTHREAD_PRIO_INHERIT:
415 /* Check if this mutex is not locked: */
416 if ((*mutex)->m_owner == NULL) {
417 /* Lock the mutex for the running thread: */
418 (*mutex)->m_owner = curthread;
420 THR_SCHED_LOCK(curthread, curthread);
421 /* Track number of priority mutexes owned: */
422 curthread->priority_mutex_count++;
425 * The mutex takes on the attributes of the
426 * running thread when there are no waiters.
428 (*mutex)->m_prio = curthread->active_priority;
429 (*mutex)->m_saved_prio =
430 curthread->inherited_priority;
431 curthread->inherited_priority = (*mutex)->m_prio;
432 THR_SCHED_UNLOCK(curthread, curthread);
434 /* Add to the list of owned mutexes: */
435 MUTEX_ASSERT_NOT_OWNED(*mutex);
436 TAILQ_INSERT_TAIL(&curthread->mutexq,
438 } else if ((*mutex)->m_owner == curthread)
439 ret = mutex_self_trylock(*mutex);
441 /* Return a busy error: */
445 /* POSIX priority protection mutex: */
446 case PTHREAD_PRIO_PROTECT:
447 /* Check for a priority ceiling violation: */
448 if (curthread->active_priority > (*mutex)->m_prio)
451 /* Check if this mutex is not locked: */
452 else if ((*mutex)->m_owner == NULL) {
453 /* Lock the mutex for the running thread: */
454 (*mutex)->m_owner = curthread;
456 THR_SCHED_LOCK(curthread, curthread);
457 /* Track number of priority mutexes owned: */
458 curthread->priority_mutex_count++;
461 * The running thread inherits the ceiling
462 * priority of the mutex and executes at that
465 curthread->active_priority = (*mutex)->m_prio;
466 (*mutex)->m_saved_prio =
467 curthread->inherited_priority;
468 curthread->inherited_priority =
470 THR_SCHED_UNLOCK(curthread, curthread);
471 /* Add to the list of owned mutexes: */
472 MUTEX_ASSERT_NOT_OWNED(*mutex);
473 TAILQ_INSERT_TAIL(&curthread->mutexq,
475 } else if ((*mutex)->m_owner == curthread)
476 ret = mutex_self_trylock(*mutex);
478 /* Return a busy error: */
482 /* Trap invalid mutex types: */
484 /* Return an invalid argument error: */
489 if (ret == 0 && private)
490 THR_CRITICAL_ENTER(curthread);
492 /* Unlock the mutex structure: */
493 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
495 /* Return the completion status: */
500 __pthread_mutex_trylock(pthread_mutex_t *mutex)
502 struct pthread *curthread = _get_curthread();
509 * If the mutex is statically initialized, perform the dynamic
512 else if ((*mutex != NULL) ||
513 ((ret = init_static(curthread, mutex)) == 0))
514 ret = mutex_trylock_common(curthread, mutex);
520 _pthread_mutex_trylock(pthread_mutex_t *mutex)
522 struct pthread *curthread = _get_curthread();
529 * If the mutex is statically initialized, perform the dynamic
530 * initialization marking the mutex private (delete safe):
532 else if ((*mutex != NULL) ||
533 ((ret = init_static_private(curthread, mutex)) == 0))
534 ret = mutex_trylock_common(curthread, mutex);
540 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
541 const struct timespec * abstime)
546 THR_ASSERT((m != NULL) && (*m != NULL),
547 "Uninitialized mutex in pthread_mutex_trylock_basic");
549 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
550 abstime->tv_nsec >= 1000000000))
553 /* Reset the interrupted flag: */
554 curthread->interrupted = 0;
555 curthread->timeout = 0;
556 curthread->wakeup_time.tv_sec = -1;
558 private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
561 * Enter a loop waiting to become the mutex owner. We need a
562 * loop in case the waiting thread is interrupted by a signal
563 * to execute a signal handler. It is not (currently) possible
564 * to remain in the waiting queue while running a handler.
565 * Instead, the thread is interrupted and backed out of the
566 * waiting queue prior to executing the signal handler.
569 /* Lock the mutex structure: */
570 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
573 * If the mutex was statically allocated, properly
574 * initialize the tail queue.
576 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
577 TAILQ_INIT(&(*m)->m_queue);
578 (*m)->m_flags |= MUTEX_FLAGS_INITED;
582 /* Process according to mutex type: */
583 switch ((*m)->m_protocol) {
584 /* Default POSIX mutex: */
585 case PTHREAD_PRIO_NONE:
586 if ((*m)->m_owner == NULL) {
587 /* Lock the mutex for this thread: */
588 (*m)->m_owner = curthread;
590 /* Add to the list of owned mutexes: */
591 MUTEX_ASSERT_NOT_OWNED(*m);
592 TAILQ_INSERT_TAIL(&curthread->mutexq,
595 THR_CRITICAL_ENTER(curthread);
597 /* Unlock the mutex structure: */
598 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
599 } else if ((*m)->m_owner == curthread) {
600 ret = mutex_self_lock(curthread, *m);
602 /* Unlock the mutex structure: */
603 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
606 * Join the queue of threads waiting to lock
607 * the mutex and save a pointer to the mutex.
609 mutex_queue_enq(*m, curthread);
610 curthread->data.mutex = *m;
611 curthread->sigbackout = mutex_lock_backout;
613 * This thread is active and is in a critical
614 * region (holding the mutex lock); we should
615 * be able to safely set the state.
617 THR_SCHED_LOCK(curthread, curthread);
618 /* Set the wakeup time: */
620 curthread->wakeup_time.tv_sec =
622 curthread->wakeup_time.tv_nsec =
626 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
627 THR_SCHED_UNLOCK(curthread, curthread);
629 /* Unlock the mutex structure: */
630 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
632 /* Schedule the next thread: */
633 _thr_sched_switch(curthread);
635 if (THR_IN_MUTEXQ(curthread)) {
636 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
637 mutex_queue_remove(*m, curthread);
638 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
641 * Only clear these after assuring the
642 * thread is dequeued.
644 curthread->data.mutex = NULL;
645 curthread->sigbackout = NULL;
649 /* POSIX priority inheritence mutex: */
650 case PTHREAD_PRIO_INHERIT:
651 /* Check if this mutex is not locked: */
652 if ((*m)->m_owner == NULL) {
653 /* Lock the mutex for this thread: */
654 (*m)->m_owner = curthread;
656 THR_SCHED_LOCK(curthread, curthread);
657 /* Track number of priority mutexes owned: */
658 curthread->priority_mutex_count++;
661 * The mutex takes on attributes of the
662 * running thread when there are no waiters.
663 * Make sure the thread's scheduling lock is
664 * held while priorities are adjusted.
666 (*m)->m_prio = curthread->active_priority;
668 curthread->inherited_priority;
669 curthread->inherited_priority = (*m)->m_prio;
670 THR_SCHED_UNLOCK(curthread, curthread);
672 /* Add to the list of owned mutexes: */
673 MUTEX_ASSERT_NOT_OWNED(*m);
674 TAILQ_INSERT_TAIL(&curthread->mutexq,
677 THR_CRITICAL_ENTER(curthread);
679 /* Unlock the mutex structure: */
680 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
681 } else if ((*m)->m_owner == curthread) {
682 ret = mutex_self_lock(curthread, *m);
684 /* Unlock the mutex structure: */
685 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
688 * Join the queue of threads waiting to lock
689 * the mutex and save a pointer to the mutex.
691 mutex_queue_enq(*m, curthread);
692 curthread->data.mutex = *m;
693 curthread->sigbackout = mutex_lock_backout;
696 * This thread is active and is in a critical
697 * region (holding the mutex lock); we should
698 * be able to safely set the state.
700 if (curthread->active_priority > (*m)->m_prio)
701 /* Adjust priorities: */
702 mutex_priority_adjust(curthread, *m);
704 THR_SCHED_LOCK(curthread, curthread);
705 /* Set the wakeup time: */
707 curthread->wakeup_time.tv_sec =
709 curthread->wakeup_time.tv_nsec =
712 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
713 THR_SCHED_UNLOCK(curthread, curthread);
715 /* Unlock the mutex structure: */
716 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
718 /* Schedule the next thread: */
719 _thr_sched_switch(curthread);
721 if (THR_IN_MUTEXQ(curthread)) {
722 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
723 mutex_queue_remove(*m, curthread);
724 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
727 * Only clear these after assuring the
728 * thread is dequeued.
730 curthread->data.mutex = NULL;
731 curthread->sigbackout = NULL;
735 /* POSIX priority protection mutex: */
736 case PTHREAD_PRIO_PROTECT:
737 /* Check for a priority ceiling violation: */
738 if (curthread->active_priority > (*m)->m_prio) {
739 /* Unlock the mutex structure: */
740 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
743 /* Check if this mutex is not locked: */
744 else if ((*m)->m_owner == NULL) {
746 * Lock the mutex for the running
749 (*m)->m_owner = curthread;
751 THR_SCHED_LOCK(curthread, curthread);
752 /* Track number of priority mutexes owned: */
753 curthread->priority_mutex_count++;
756 * The running thread inherits the ceiling
757 * priority of the mutex and executes at that
758 * priority. Make sure the thread's
759 * scheduling lock is held while priorities
762 curthread->active_priority = (*m)->m_prio;
764 curthread->inherited_priority;
765 curthread->inherited_priority = (*m)->m_prio;
766 THR_SCHED_UNLOCK(curthread, curthread);
768 /* Add to the list of owned mutexes: */
769 MUTEX_ASSERT_NOT_OWNED(*m);
770 TAILQ_INSERT_TAIL(&curthread->mutexq,
773 THR_CRITICAL_ENTER(curthread);
775 /* Unlock the mutex structure: */
776 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
777 } else if ((*m)->m_owner == curthread) {
778 ret = mutex_self_lock(curthread, *m);
780 /* Unlock the mutex structure: */
781 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
784 * Join the queue of threads waiting to lock
785 * the mutex and save a pointer to the mutex.
787 mutex_queue_enq(*m, curthread);
788 curthread->data.mutex = *m;
789 curthread->sigbackout = mutex_lock_backout;
791 /* Clear any previous error: */
792 curthread->error = 0;
795 * This thread is active and is in a critical
796 * region (holding the mutex lock); we should
797 * be able to safely set the state.
800 THR_SCHED_LOCK(curthread, curthread);
801 /* Set the wakeup time: */
803 curthread->wakeup_time.tv_sec =
805 curthread->wakeup_time.tv_nsec =
808 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
809 THR_SCHED_UNLOCK(curthread, curthread);
811 /* Unlock the mutex structure: */
812 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
814 /* Schedule the next thread: */
815 _thr_sched_switch(curthread);
817 if (THR_IN_MUTEXQ(curthread)) {
818 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
819 mutex_queue_remove(*m, curthread);
820 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
823 * Only clear these after assuring the
824 * thread is dequeued.
826 curthread->data.mutex = NULL;
827 curthread->sigbackout = NULL;
830 * The threads priority may have changed while
831 * waiting for the mutex causing a ceiling
834 ret = curthread->error;
835 curthread->error = 0;
839 /* Trap invalid mutex types: */
841 /* Unlock the mutex structure: */
842 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
844 /* Return an invalid argument error: */
849 } while (((*m)->m_owner != curthread) && (ret == 0) &&
850 (curthread->interrupted == 0) && (curthread->timeout == 0));
852 if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
856 * Check to see if this thread was interrupted and
857 * is still in the mutex queue of waiting threads:
859 if (curthread->interrupted != 0) {
860 /* Remove this thread from the mutex queue. */
861 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
862 if (THR_IN_SYNCQ(curthread))
863 mutex_queue_remove(*m, curthread);
864 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
866 /* Check for asynchronous cancellation. */
867 if (curthread->continuation != NULL)
868 curthread->continuation((void *) curthread);
871 /* Return the completion status: */
876 __pthread_mutex_lock(pthread_mutex_t *m)
878 struct pthread *curthread;
881 if (_thr_initial == NULL)
882 _libpthread_init(NULL);
884 curthread = _get_curthread();
889 * If the mutex is statically initialized, perform the dynamic
892 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
893 ret = mutex_lock_common(curthread, m, NULL);
898 __strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
901 _pthread_mutex_lock(pthread_mutex_t *m)
903 struct pthread *curthread;
906 if (_thr_initial == NULL)
907 _libpthread_init(NULL);
908 curthread = _get_curthread();
914 * If the mutex is statically initialized, perform the dynamic
915 * initialization marking it private (delete safe):
917 else if ((*m != NULL) ||
918 ((ret = init_static_private(curthread, m)) == 0))
919 ret = mutex_lock_common(curthread, m, NULL);
925 __pthread_mutex_timedlock(pthread_mutex_t *m,
926 const struct timespec *abs_timeout)
928 struct pthread *curthread;
931 if (_thr_initial == NULL)
932 _libpthread_init(NULL);
934 curthread = _get_curthread();
939 * If the mutex is statically initialized, perform the dynamic
942 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
943 ret = mutex_lock_common(curthread, m, abs_timeout);
949 _pthread_mutex_timedlock(pthread_mutex_t *m,
950 const struct timespec *abs_timeout)
952 struct pthread *curthread;
955 if (_thr_initial == NULL)
956 _libpthread_init(NULL);
957 curthread = _get_curthread();
963 * If the mutex is statically initialized, perform the dynamic
964 * initialization marking it private (delete safe):
966 else if ((*m != NULL) ||
967 ((ret = init_static_private(curthread, m)) == 0))
968 ret = mutex_lock_common(curthread, m, abs_timeout);
974 _pthread_mutex_unlock(pthread_mutex_t *m)
976 return (mutex_unlock_common(m, /* add reference */ 0));
979 __strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
982 _mutex_cv_unlock(pthread_mutex_t *m)
984 return (mutex_unlock_common(m, /* add reference */ 1));
988 _mutex_cv_lock(pthread_mutex_t *m)
990 struct pthread *curthread;
993 curthread = _get_curthread();
994 if ((ret = _pthread_mutex_lock(m)) == 0) {
995 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
997 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1003 mutex_self_trylock(pthread_mutex_t m)
1007 switch (m->m_type) {
1008 /* case PTHREAD_MUTEX_DEFAULT: */
1009 case PTHREAD_MUTEX_ERRORCHECK:
1010 case PTHREAD_MUTEX_NORMAL:
1011 case PTHREAD_MUTEX_ADAPTIVE_NP:
1015 case PTHREAD_MUTEX_RECURSIVE:
1016 /* Increment the lock count: */
1021 /* Trap invalid mutex types; */
1029 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
1034 * Don't allow evil recursive mutexes for private use
1035 * in libc and libpthread.
1037 if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1038 PANIC("Recurse on a private mutex.");
1040 switch (m->m_type) {
1041 /* case PTHREAD_MUTEX_DEFAULT: */
1042 case PTHREAD_MUTEX_ERRORCHECK:
1043 case PTHREAD_MUTEX_ADAPTIVE_NP:
1045 * POSIX specifies that mutexes should return EDEADLK if a
1046 * recursive lock is detected.
1051 case PTHREAD_MUTEX_NORMAL:
1053 * What SS2 define as a 'normal' mutex. Intentionally
1054 * deadlock on attempts to get a lock you already own.
1057 THR_SCHED_LOCK(curthread, curthread);
1058 THR_SET_STATE(curthread, PS_DEADLOCK);
1059 THR_SCHED_UNLOCK(curthread, curthread);
1061 /* Unlock the mutex structure: */
1062 THR_LOCK_RELEASE(curthread, &m->m_lock);
1064 /* Schedule the next thread: */
1065 _thr_sched_switch(curthread);
1068 case PTHREAD_MUTEX_RECURSIVE:
1069 /* Increment the lock count: */
1074 /* Trap invalid mutex types; */
1082 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1084 struct pthread *curthread = _get_curthread();
1085 struct kse_mailbox *kmbx = NULL;
1088 if (m == NULL || *m == NULL)
1091 /* Lock the mutex structure: */
1092 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1094 /* Process according to mutex type: */
1095 switch ((*m)->m_protocol) {
1096 /* Default POSIX mutex: */
1097 case PTHREAD_PRIO_NONE:
1099 * Check if the running thread is not the owner of the
1102 if ((*m)->m_owner != curthread)
1104 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1105 ((*m)->m_count > 0))
1106 /* Decrement the count: */
1110 * Clear the count in case this is a recursive
1115 /* Remove the mutex from the threads queue. */
1116 MUTEX_ASSERT_IS_OWNED(*m);
1117 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1119 MUTEX_INIT_LINK(*m);
1122 * Hand off the mutex to the next waiting
1125 kmbx = mutex_handoff(curthread, *m);
1129 /* POSIX priority inheritence mutex: */
1130 case PTHREAD_PRIO_INHERIT:
1132 * Check if the running thread is not the owner of the
1135 if ((*m)->m_owner != curthread)
1137 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1138 ((*m)->m_count > 0))
1139 /* Decrement the count: */
1143 * Clear the count in case this is recursive
1149 * Restore the threads inherited priority and
1150 * recompute the active priority (being careful
1151 * not to override changes in the threads base
1152 * priority subsequent to locking the mutex).
1154 THR_SCHED_LOCK(curthread, curthread);
1155 curthread->inherited_priority =
1157 curthread->active_priority =
1158 MAX(curthread->inherited_priority,
1159 curthread->base_priority);
1162 * This thread now owns one less priority mutex.
1164 curthread->priority_mutex_count--;
1165 THR_SCHED_UNLOCK(curthread, curthread);
1167 /* Remove the mutex from the threads queue. */
1168 MUTEX_ASSERT_IS_OWNED(*m);
1169 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1171 MUTEX_INIT_LINK(*m);
1174 * Hand off the mutex to the next waiting
1177 kmbx = mutex_handoff(curthread, *m);
1181 /* POSIX priority ceiling mutex: */
1182 case PTHREAD_PRIO_PROTECT:
1184 * Check if the running thread is not the owner of the
1187 if ((*m)->m_owner != curthread)
1189 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1190 ((*m)->m_count > 0))
1191 /* Decrement the count: */
1195 * Clear the count in case this is a recursive
1201 * Restore the threads inherited priority and
1202 * recompute the active priority (being careful
1203 * not to override changes in the threads base
1204 * priority subsequent to locking the mutex).
1206 THR_SCHED_LOCK(curthread, curthread);
1207 curthread->inherited_priority =
1209 curthread->active_priority =
1210 MAX(curthread->inherited_priority,
1211 curthread->base_priority);
1214 * This thread now owns one less priority mutex.
1216 curthread->priority_mutex_count--;
1217 THR_SCHED_UNLOCK(curthread, curthread);
1219 /* Remove the mutex from the threads queue. */
1220 MUTEX_ASSERT_IS_OWNED(*m);
1221 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1223 MUTEX_INIT_LINK(*m);
1226 * Hand off the mutex to the next waiting
1229 kmbx = mutex_handoff(curthread, *m);
1233 /* Trap invalid mutex types: */
1235 /* Return an invalid argument error: */
1240 if ((ret == 0) && (add_reference != 0))
1241 /* Increment the reference count: */
1244 /* Leave the critical region if this is a private mutex. */
1245 if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1246 THR_CRITICAL_LEAVE(curthread);
1248 /* Unlock the mutex structure: */
1249 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1255 /* Return the completion status: */
1261 * This function is called when a change in base priority occurs for
1262 * a thread that is holding or waiting for a priority protection or
1263 * inheritence mutex. A change in a threads base priority can effect
1264 * changes to active priorities of other threads and to the ordering
1265 * of mutex locking by waiting threads.
1267 * This must be called without the target thread's scheduling lock held.
1270 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1273 struct pthread_mutex *m;
1275 /* Adjust the priorites of any owned priority mutexes: */
1276 if (pthread->priority_mutex_count > 0) {
1278 * Rescan the mutexes owned by this thread and correct
1279 * their priorities to account for this threads change
1280 * in priority. This has the side effect of changing
1281 * the threads active priority.
1283 * Be sure to lock the first mutex in the list of owned
1284 * mutexes. This acts as a barrier against another
1285 * simultaneous call to change the threads priority
1286 * and from the owning thread releasing the mutex.
1288 m = TAILQ_FIRST(&pthread->mutexq);
1290 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1292 * Make sure the thread still owns the lock.
1294 if (m == TAILQ_FIRST(&pthread->mutexq))
1295 mutex_rescan_owned(curthread, pthread,
1296 /* rescan all owned */ NULL);
1297 THR_LOCK_RELEASE(curthread, &m->m_lock);
1302 * If this thread is waiting on a priority inheritence mutex,
1303 * check for priority adjustments. A change in priority can
1304 * also cause a ceiling violation(*) for a thread waiting on
1305 * a priority protection mutex; we don't perform the check here
1306 * as it is done in pthread_mutex_unlock.
1308 * (*) It should be noted that a priority change to a thread
1309 * _after_ taking and owning a priority ceiling mutex
1310 * does not affect ownership of that mutex; the ceiling
1311 * priority is only checked before mutex ownership occurs.
1313 if (propagate_prio != 0) {
1315 * Lock the thread's scheduling queue. This is a bit
1316 * convoluted; the "in synchronization queue flag" can
1317 * only be cleared with both the thread's scheduling and
1318 * mutex locks held. The thread's pointer to the wanted
1319 * mutex is guaranteed to be valid during this time.
1321 THR_SCHED_LOCK(curthread, pthread);
1323 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1324 ((m = pthread->data.mutex) == NULL))
1325 THR_SCHED_UNLOCK(curthread, pthread);
1328 * This thread is currently waiting on a mutex; unlock
1329 * the scheduling queue lock and lock the mutex. We
1330 * can't hold both at the same time because the locking
1331 * order could cause a deadlock.
1333 THR_SCHED_UNLOCK(curthread, pthread);
1334 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1337 * Check to make sure this thread is still in the
1338 * same state (the lock above can yield the CPU to
1339 * another thread or the thread may be running on
1342 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1343 (pthread->data.mutex == m)) {
1345 * Remove and reinsert this thread into
1346 * the list of waiting threads to preserve
1347 * decreasing priority order.
1349 mutex_queue_remove(m, pthread);
1350 mutex_queue_enq(m, pthread);
1352 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1353 /* Adjust priorities: */
1354 mutex_priority_adjust(curthread, m);
1357 /* Unlock the mutex structure: */
1358 THR_LOCK_RELEASE(curthread, &m->m_lock);
1364 * Called when a new thread is added to the mutex waiting queue or
1365 * when a threads priority changes that is already in the mutex
1368 * This must be called with the mutex locked by the current thread.
1371 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1373 pthread_mutex_t m = mutex;
1374 struct pthread *pthread_next, *pthread = mutex->m_owner;
1375 int done, temp_prio;
1378 * Calculate the mutex priority as the maximum of the highest
1379 * active priority of any waiting threads and the owning threads
1380 * active priority(*).
1382 * (*) Because the owning threads current active priority may
1383 * reflect priority inherited from this mutex (and the mutex
1384 * priority may have changed) we must recalculate the active
1385 * priority based on the threads saved inherited priority
1386 * and its base priority.
1388 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1389 temp_prio = MAX(pthread_next->active_priority,
1390 MAX(m->m_saved_prio, pthread->base_priority));
1392 /* See if this mutex really needs adjusting: */
1393 if (temp_prio == m->m_prio)
1394 /* No need to propagate the priority: */
1397 /* Set new priority of the mutex: */
1398 m->m_prio = temp_prio;
1401 * Don't unlock the mutex passed in as an argument. It is
1402 * expected to be locked and unlocked by the caller.
1407 * Save the threads priority before rescanning the
1410 temp_prio = pthread->active_priority;
1413 * Fix the priorities for all mutexes held by the owning
1414 * thread since taking this mutex. This also has a
1415 * potential side-effect of changing the threads priority.
1417 * At this point the mutex is locked by the current thread.
1418 * The owning thread can't release the mutex until it is
1419 * unlocked, so we should be able to safely walk its list
1422 mutex_rescan_owned(curthread, pthread, m);
1425 * If this isn't the first time through the loop,
1426 * the current mutex needs to be unlocked.
1429 THR_LOCK_RELEASE(curthread, &m->m_lock);
1431 /* Assume we're done unless told otherwise: */
1435 * If the thread is currently waiting on a mutex, check
1436 * to see if the threads new priority has affected the
1437 * priority of the mutex.
1439 if ((temp_prio != pthread->active_priority) &&
1440 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1441 ((m = pthread->data.mutex) != NULL) &&
1442 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1443 /* Lock the mutex structure: */
1444 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1447 * Make sure the thread is still waiting on the
1450 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1451 (m == pthread->data.mutex)) {
1453 * The priority for this thread has changed.
1454 * Remove and reinsert this thread into the
1455 * list of waiting threads to preserve
1456 * decreasing priority order.
1458 mutex_queue_remove(m, pthread);
1459 mutex_queue_enq(m, pthread);
1462 * Grab the waiting thread with highest
1465 pthread_next = TAILQ_FIRST(&m->m_queue);
1468 * Calculate the mutex priority as the maximum
1469 * of the highest active priority of any
1470 * waiting threads and the owning threads
1473 temp_prio = MAX(pthread_next->active_priority,
1474 MAX(m->m_saved_prio,
1475 m->m_owner->base_priority));
1477 if (temp_prio != m->m_prio) {
1479 * The priority needs to be propagated
1480 * to the mutex this thread is waiting
1481 * on and up to the owner of that mutex.
1483 m->m_prio = temp_prio;
1484 pthread = m->m_owner;
1486 /* We're not done yet: */
1490 /* Only release the mutex if we're done: */
1492 THR_LOCK_RELEASE(curthread, &m->m_lock);
1494 } while (done == 0);
1498 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1499 struct pthread_mutex *mutex)
1501 struct pthread_mutex *m;
1502 struct pthread *pthread_next;
1503 int active_prio, inherited_prio;
1506 * Start walking the mutexes the thread has taken since
1507 * taking this mutex.
1509 if (mutex == NULL) {
1511 * A null mutex means start at the beginning of the owned
1514 m = TAILQ_FIRST(&pthread->mutexq);
1516 /* There is no inherited priority yet. */
1520 * The caller wants to start after a specific mutex. It
1521 * is assumed that this mutex is a priority inheritence
1522 * mutex and that its priority has been correctly
1525 m = TAILQ_NEXT(mutex, m_qe);
1527 /* Start inheriting priority from the specified mutex. */
1528 inherited_prio = mutex->m_prio;
1530 active_prio = MAX(inherited_prio, pthread->base_priority);
1532 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1534 * We only want to deal with priority inheritence
1535 * mutexes. This might be optimized by only placing
1536 * priority inheritence mutexes into the owned mutex
1537 * list, but it may prove to be useful having all
1538 * owned mutexes in this list. Consider a thread
1539 * exiting while holding mutexes...
1541 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1543 * Fix the owners saved (inherited) priority to
1544 * reflect the priority of the previous mutex.
1546 m->m_saved_prio = inherited_prio;
1548 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1549 /* Recalculate the priority of the mutex: */
1550 m->m_prio = MAX(active_prio,
1551 pthread_next->active_priority);
1553 m->m_prio = active_prio;
1555 /* Recalculate new inherited and active priorities: */
1556 inherited_prio = m->m_prio;
1557 active_prio = MAX(m->m_prio, pthread->base_priority);
1562 * Fix the threads inherited priority and recalculate its
1565 pthread->inherited_priority = inherited_prio;
1566 active_prio = MAX(inherited_prio, pthread->base_priority);
1568 if (active_prio != pthread->active_priority) {
1569 /* Lock the thread's scheduling queue: */
1570 THR_SCHED_LOCK(curthread, pthread);
1572 if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1574 * This thread is not in a run queue. Just set
1575 * its active priority.
1577 pthread->active_priority = active_prio;
1581 * This thread is in a run queue. Remove it from
1582 * the queue before changing its priority:
1584 THR_RUNQ_REMOVE(pthread);
1587 * POSIX states that if the priority is being
1588 * lowered, the thread must be inserted at the
1589 * head of the queue for its priority if it owns
1590 * any priority protection or inheritence mutexes.
1592 if ((active_prio < pthread->active_priority) &&
1593 (pthread->priority_mutex_count > 0)) {
1594 /* Set the new active priority. */
1595 pthread->active_priority = active_prio;
1597 THR_RUNQ_INSERT_HEAD(pthread);
1599 /* Set the new active priority. */
1600 pthread->active_priority = active_prio;
1602 THR_RUNQ_INSERT_TAIL(pthread);
1605 THR_SCHED_UNLOCK(curthread, pthread);
1610 _mutex_unlock_private(pthread_t pthread)
1612 struct pthread_mutex *m, *m_next;
1614 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1615 m_next = TAILQ_NEXT(m, m_qe);
1616 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1617 _pthread_mutex_unlock(&m);
1622 * This is called by the current thread when it wants to back out of a
1623 * mutex_lock in order to run a signal handler.
1626 mutex_lock_backout(void *arg)
1628 struct pthread *curthread = (struct pthread *)arg;
1629 struct pthread_mutex *m;
1631 if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1633 * Any other thread may clear the "in sync queue flag",
1634 * but only the current thread can clear the pointer
1635 * to the mutex. So if the flag is set, we can
1636 * guarantee that the pointer to the mutex is valid.
1637 * The only problem may be if the mutex is destroyed
1638 * out from under us, but that should be considered
1639 * an application bug.
1641 m = curthread->data.mutex;
1643 /* Lock the mutex structure: */
1644 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1648 * Check to make sure this thread doesn't already own
1649 * the mutex. Since mutexes are unlocked with direct
1650 * handoffs, it is possible the previous owner gave it
1651 * to us after we checked the sync queue flag and before
1652 * we locked the mutex structure.
1654 if (m->m_owner == curthread) {
1655 THR_LOCK_RELEASE(curthread, &m->m_lock);
1656 mutex_unlock_common(&m, /* add_reference */ 0);
1659 * Remove ourselves from the mutex queue and
1660 * clear the pointer to the mutex. We may no
1661 * longer be in the mutex queue, but the removal
1662 * function will DTRT.
1664 mutex_queue_remove(m, curthread);
1665 curthread->data.mutex = NULL;
1666 THR_LOCK_RELEASE(curthread, &m->m_lock);
1669 /* No need to call this again. */
1670 curthread->sigbackout = NULL;
1674 * Dequeue a waiting thread from the head of a mutex queue in descending
1677 * In order to properly dequeue a thread from the mutex queue and
1678 * make it runnable without the possibility of errant wakeups, it
1679 * is necessary to lock the thread's scheduling queue while also
1680 * holding the mutex lock.
1682 static struct kse_mailbox *
1683 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1685 struct kse_mailbox *kmbx = NULL;
1686 struct pthread *pthread;
1688 /* Keep dequeueing until we find a valid thread: */
1689 mutex->m_owner = NULL;
1690 pthread = TAILQ_FIRST(&mutex->m_queue);
1691 while (pthread != NULL) {
1692 /* Take the thread's scheduling lock: */
1693 THR_SCHED_LOCK(curthread, pthread);
1695 /* Remove the thread from the mutex queue: */
1696 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1697 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1700 * Only exit the loop if the thread hasn't been
1703 switch (mutex->m_protocol) {
1704 case PTHREAD_PRIO_NONE:
1706 * Assign the new owner and add the mutex to the
1707 * thread's list of owned mutexes.
1709 mutex->m_owner = pthread;
1710 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1713 case PTHREAD_PRIO_INHERIT:
1715 * Assign the new owner and add the mutex to the
1716 * thread's list of owned mutexes.
1718 mutex->m_owner = pthread;
1719 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1721 /* Track number of priority mutexes owned: */
1722 pthread->priority_mutex_count++;
1725 * Set the priority of the mutex. Since our waiting
1726 * threads are in descending priority order, the
1727 * priority of the mutex becomes the active priority
1728 * of the thread we just dequeued.
1730 mutex->m_prio = pthread->active_priority;
1732 /* Save the owning threads inherited priority: */
1733 mutex->m_saved_prio = pthread->inherited_priority;
1736 * The owning threads inherited priority now becomes
1737 * his active priority (the priority of the mutex).
1739 pthread->inherited_priority = mutex->m_prio;
1742 case PTHREAD_PRIO_PROTECT:
1743 if (pthread->active_priority > mutex->m_prio) {
1745 * Either the mutex ceiling priority has
1746 * been lowered and/or this threads priority
1747 * has been raised subsequent to the thread
1748 * being queued on the waiting list.
1750 pthread->error = EINVAL;
1754 * Assign the new owner and add the mutex
1755 * to the thread's list of owned mutexes.
1757 mutex->m_owner = pthread;
1758 TAILQ_INSERT_TAIL(&pthread->mutexq,
1761 /* Track number of priority mutexes owned: */
1762 pthread->priority_mutex_count++;
1765 * Save the owning threads inherited
1768 mutex->m_saved_prio =
1769 pthread->inherited_priority;
1772 * The owning thread inherits the ceiling
1773 * priority of the mutex and executes at
1776 pthread->inherited_priority = mutex->m_prio;
1777 pthread->active_priority = mutex->m_prio;
1783 /* Make the thread runnable and unlock the scheduling queue: */
1784 kmbx = _thr_setrunnable_unlocked(pthread);
1786 /* Add a preemption point. */
1787 if ((curthread->kseg == pthread->kseg) &&
1788 (pthread->active_priority > curthread->active_priority))
1789 curthread->critical_yield = 1;
1791 if (mutex->m_owner == pthread) {
1792 /* We're done; a valid owner was found. */
1793 if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1794 THR_CRITICAL_ENTER(pthread);
1795 THR_SCHED_UNLOCK(curthread, pthread);
1798 THR_SCHED_UNLOCK(curthread, pthread);
1799 /* Get the next thread from the waiting queue: */
1800 pthread = TAILQ_NEXT(pthread, sqe);
1803 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1804 /* This mutex has no priority: */
1810 * Dequeue a waiting thread from the head of a mutex queue in descending
1813 static inline pthread_t
1814 mutex_queue_deq(struct pthread_mutex *mutex)
1818 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1819 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1820 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1823 * Only exit the loop if the thread hasn't been
1826 if (pthread->interrupted == 0)
1834 * Remove a waiting thread from a mutex queue in descending priority order.
1837 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1839 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1840 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1841 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1846 * Enqueue a waiting thread to a queue in descending priority order.
1849 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1851 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1853 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1855 * For the common case of all threads having equal priority,
1856 * we perform a quick check against the priority of the thread
1857 * at the tail of the queue.
1859 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1860 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1862 tid = TAILQ_FIRST(&mutex->m_queue);
1863 while (pthread->active_priority <= tid->active_priority)
1864 tid = TAILQ_NEXT(tid, sqe);
1865 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1867 pthread->sflags |= THR_FLAGS_IN_SYNCQ;