2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <sys/param.h>
39 #include <sys/queue.h>
41 #include "thr_private.h"
43 #if defined(_PTHREADS_INVARIANTS)
44 #define MUTEX_INIT_LINK(m) do { \
45 (m)->m_qe.tqe_prev = NULL; \
46 (m)->m_qe.tqe_next = NULL; \
48 #define MUTEX_ASSERT_IS_OWNED(m) do { \
49 if ((m)->m_qe.tqe_prev == NULL) \
50 PANIC("mutex is not on list"); \
52 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
53 if (((m)->m_qe.tqe_prev != NULL) || \
54 ((m)->m_qe.tqe_next != NULL)) \
55 PANIC("mutex is on list"); \
57 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
58 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
59 "thread in syncq when it shouldn't be."); \
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
68 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
69 #define MUTEX_DESTROY(m) do { \
77 static long mutex_handoff(struct pthread *, struct pthread_mutex *);
78 static int mutex_self_trylock(struct pthread *, pthread_mutex_t);
79 static int mutex_self_lock(struct pthread *, pthread_mutex_t,
80 const struct timespec *abstime);
81 static int mutex_unlock_common(pthread_mutex_t *, int);
82 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83 static void mutex_rescan_owned (struct pthread *, struct pthread *,
84 struct pthread_mutex *);
86 static pthread_t mutex_queue_deq(pthread_mutex_t);
88 static void mutex_queue_remove(pthread_mutex_t, pthread_t);
89 static void mutex_queue_enq(pthread_mutex_t, pthread_t);
91 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
92 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
93 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
94 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
96 /* Single underscore versions provided for libc internal usage: */
97 /* No difference between libc and application usage of these: */
98 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
99 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
102 mutex_init(pthread_mutex_t *mutex,
103 const pthread_mutexattr_t *mutex_attr, int private)
105 struct pthread_mutex *pmutex;
106 enum pthread_mutextype type;
112 /* Check if default mutex attributes: */
113 if (mutex_attr == NULL || *mutex_attr == NULL) {
114 /* Default to a (error checking) POSIX mutex: */
115 type = PTHREAD_MUTEX_ERRORCHECK;
116 protocol = PTHREAD_PRIO_NONE;
117 ceiling = THR_MAX_PRIORITY;
121 /* Check mutex type: */
122 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
123 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
124 /* Return an invalid argument error: */
127 /* Check mutex protocol: */
128 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
129 ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT))
130 /* Return an invalid argument error: */
134 /* Use the requested mutex type and protocol: */
135 type = (*mutex_attr)->m_type;
136 protocol = (*mutex_attr)->m_protocol;
137 ceiling = (*mutex_attr)->m_ceiling;
138 flags = (*mutex_attr)->m_flags;
141 /* Check no errors so far: */
143 if ((pmutex = (pthread_mutex_t)
144 malloc(sizeof(struct pthread_mutex))) == NULL) {
147 _thr_umtx_init(&pmutex->m_lock);
148 /* Set the mutex flags: */
149 pmutex->m_flags = flags;
151 /* Process according to mutex type: */
153 /* case PTHREAD_MUTEX_DEFAULT: */
154 case PTHREAD_MUTEX_ERRORCHECK:
155 case PTHREAD_MUTEX_NORMAL:
156 /* Nothing to do here. */
159 /* Single UNIX Spec 2 recursive mutex: */
160 case PTHREAD_MUTEX_RECURSIVE:
161 /* Reset the mutex count: */
165 /* Trap invalid mutex types: */
167 /* Return an invalid argument error: */
172 /* Initialise the rest of the mutex: */
173 TAILQ_INIT(&pmutex->m_queue);
174 pmutex->m_flags |= MUTEX_FLAGS_INITED;
176 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
177 pmutex->m_owner = NULL;
178 pmutex->m_type = type;
179 pmutex->m_protocol = protocol;
180 pmutex->m_refcount = 0;
181 if (protocol == PTHREAD_PRIO_PROTECT)
182 pmutex->m_prio = ceiling;
185 pmutex->m_saved_prio = 0;
186 MUTEX_INIT_LINK(pmutex);
189 /* Free the mutex lock structure: */
190 MUTEX_DESTROY(pmutex);
195 /* Return the completion status: */
200 init_static(struct pthread *thread, pthread_mutex_t *mutex)
204 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
207 ret = mutex_init(mutex, NULL, 0);
211 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
217 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
221 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
224 ret = mutex_init(mutex, NULL, 1);
228 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
234 _pthread_mutex_init(pthread_mutex_t *mutex,
235 const pthread_mutexattr_t *mutex_attr)
237 return mutex_init(mutex, mutex_attr, 1);
241 __pthread_mutex_init(pthread_mutex_t *mutex,
242 const pthread_mutexattr_t *mutex_attr)
244 return mutex_init(mutex, mutex_attr, 0);
248 _mutex_reinit(pthread_mutex_t *mutex)
250 _thr_umtx_init(&(*mutex)->m_lock);
251 TAILQ_INIT(&(*mutex)->m_queue);
252 MUTEX_INIT_LINK(*mutex);
253 (*mutex)->m_owner = NULL;
254 (*mutex)->m_count = 0;
255 (*mutex)->m_refcount = 0;
256 (*mutex)->m_prio = 0;
257 (*mutex)->m_saved_prio = 0;
262 _mutex_fork(struct pthread *curthread)
264 struct pthread_mutex *m;
267 * Fix mutex ownership for child process.
268 * note that process shared mutex should not
269 * be inherited because owner is forking thread
270 * which is in parent process, they should be
271 * removed from the owned mutex list, current,
272 * process shared mutex is not supported, so I
275 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) {
276 m->m_lock = (umtx_t)curthread->tid;
279 /* Clear contender for priority mutexes */
280 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
281 /* clear another thread locked us */
282 _thr_umtx_init(&m->m_lock);
283 TAILQ_INIT(&m->m_queue);
288 _pthread_mutex_destroy(pthread_mutex_t *mutex)
290 struct pthread *curthread = _get_curthread();
294 if (mutex == NULL || *mutex == NULL)
298 * Try to lock the mutex structure, we only need to
299 * try once, if failed, the mutex is in used.
301 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
306 * Check mutex other fields to see if this mutex is
307 * in use. Mostly for prority mutex types, or there
308 * are condition variables referencing it.
310 if (((*mutex)->m_owner != NULL) ||
311 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
312 ((*mutex)->m_refcount != 0)) {
313 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
317 * Save a pointer to the mutex so it can be free'd
318 * and set the caller's pointer to NULL:
323 /* Unlock the mutex structure: */
324 _thr_umtx_unlock(&m->m_lock, curthread->tid);
327 * Free the memory allocated for the mutex
330 MUTEX_ASSERT_NOT_OWNED(m);
335 /* Return the completion status: */
340 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
344 THR_ASSERT((mutex != NULL) && (*mutex != NULL),
345 "Uninitialized mutex in mutex_trylock_common");
347 /* Short cut for simple mutex. */
348 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
349 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
351 (*mutex)->m_owner = curthread;
352 /* Add to the list of owned mutexes: */
353 MUTEX_ASSERT_NOT_OWNED(*mutex);
354 TAILQ_INSERT_TAIL(&curthread->mutexq,
356 } else if ((*mutex)->m_owner == curthread) {
357 ret = mutex_self_trylock(curthread, *mutex);
363 /* Code for priority mutex */
365 /* Lock the mutex structure: */
366 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
369 * If the mutex was statically allocated, properly
370 * initialize the tail queue.
372 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
373 TAILQ_INIT(&(*mutex)->m_queue);
374 MUTEX_INIT_LINK(*mutex);
375 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
378 /* Process according to mutex type: */
379 switch ((*mutex)->m_protocol) {
380 /* POSIX priority inheritence mutex: */
381 case PTHREAD_PRIO_INHERIT:
382 /* Check if this mutex is not locked: */
383 if ((*mutex)->m_owner == NULL) {
384 /* Lock the mutex for the running thread: */
385 (*mutex)->m_owner = curthread;
388 /* Track number of priority mutexes owned: */
389 curthread->priority_mutex_count++;
392 * The mutex takes on the attributes of the
393 * running thread when there are no waiters.
395 (*mutex)->m_prio = curthread->active_priority;
396 (*mutex)->m_saved_prio =
397 curthread->inherited_priority;
398 curthread->inherited_priority = (*mutex)->m_prio;
399 THR_UNLOCK(curthread);
401 /* Add to the list of owned mutexes: */
402 MUTEX_ASSERT_NOT_OWNED(*mutex);
403 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
405 } else if ((*mutex)->m_owner == curthread)
406 ret = mutex_self_trylock(curthread, *mutex);
408 /* Return a busy error: */
412 /* POSIX priority protection mutex: */
413 case PTHREAD_PRIO_PROTECT:
414 /* Check for a priority ceiling violation: */
415 if (curthread->active_priority > (*mutex)->m_prio)
418 /* Check if this mutex is not locked: */
419 else if ((*mutex)->m_owner == NULL) {
420 /* Lock the mutex for the running thread: */
421 (*mutex)->m_owner = curthread;
424 /* Track number of priority mutexes owned: */
425 curthread->priority_mutex_count++;
428 * The running thread inherits the ceiling
429 * priority of the mutex and executes at that
432 curthread->active_priority = (*mutex)->m_prio;
433 (*mutex)->m_saved_prio =
434 curthread->inherited_priority;
435 curthread->inherited_priority =
437 THR_UNLOCK(curthread);
438 /* Add to the list of owned mutexes: */
439 MUTEX_ASSERT_NOT_OWNED(*mutex);
440 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
442 } else if ((*mutex)->m_owner == curthread)
443 ret = mutex_self_trylock(curthread, *mutex);
445 /* Return a busy error: */
449 /* Trap invalid mutex types: */
451 /* Return an invalid argument error: */
456 /* Unlock the mutex structure: */
457 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
459 /* Return the completion status: */
464 __pthread_mutex_trylock(pthread_mutex_t *mutex)
466 struct pthread *curthread = _get_curthread();
470 * If the mutex is statically initialized, perform the dynamic
473 if ((*mutex != NULL) ||
474 ((ret = init_static(curthread, mutex)) == 0))
475 ret = mutex_trylock_common(curthread, mutex);
481 _pthread_mutex_trylock(pthread_mutex_t *mutex)
483 struct pthread *curthread = _get_curthread();
487 * If the mutex is statically initialized, perform the dynamic
488 * initialization marking the mutex private (delete safe):
490 if ((*mutex != NULL) ||
491 ((ret = init_static_private(curthread, mutex)) == 0))
492 ret = mutex_trylock_common(curthread, mutex);
498 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
499 const struct timespec * abstime)
501 struct timespec ts, ts2;
505 THR_ASSERT((m != NULL) && (*m != NULL),
506 "Uninitialized mutex in mutex_lock_common");
508 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
509 abstime->tv_nsec >= 1000000000))
512 /* Short cut for simple mutex. */
514 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
515 /* Default POSIX mutex: */
516 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
518 (*m)->m_owner = curthread;
519 /* Add to the list of owned mutexes: */
520 MUTEX_ASSERT_NOT_OWNED(*m);
521 TAILQ_INSERT_TAIL(&curthread->mutexq,
523 } else if ((*m)->m_owner == curthread) {
524 ret = mutex_self_lock(curthread, *m, abstime);
526 if (abstime == NULL) {
527 THR_UMTX_LOCK(curthread, &(*m)->m_lock);
530 clock_gettime(CLOCK_REALTIME, &ts);
531 TIMESPEC_SUB(&ts2, abstime, &ts);
532 ret = THR_UMTX_TIMEDLOCK(curthread,
533 &(*m)->m_lock, &ts2);
535 * Timed out wait is not restarted if
536 * it was interrupted, not worth to do it.
542 (*m)->m_owner = curthread;
543 /* Add to the list of owned mutexes: */
544 MUTEX_ASSERT_NOT_OWNED(*m);
545 TAILQ_INSERT_TAIL(&curthread->mutexq,
552 /* Code for priority mutex */
555 * Enter a loop waiting to become the mutex owner. We need a
556 * loop in case the waiting thread is interrupted by a signal
557 * to execute a signal handler. It is not (currently) possible
558 * to remain in the waiting queue while running a handler.
559 * Instead, the thread is interrupted and backed out of the
560 * waiting queue prior to executing the signal handler.
563 /* Lock the mutex structure: */
564 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
567 * If the mutex was statically allocated, properly
568 * initialize the tail queue.
570 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
571 TAILQ_INIT(&(*m)->m_queue);
572 (*m)->m_flags |= MUTEX_FLAGS_INITED;
576 /* Process according to mutex type: */
577 switch ((*m)->m_protocol) {
578 /* POSIX priority inheritence mutex: */
579 case PTHREAD_PRIO_INHERIT:
580 /* Check if this mutex is not locked: */
581 if ((*m)->m_owner == NULL) {
582 /* Lock the mutex for this thread: */
583 (*m)->m_owner = curthread;
586 /* Track number of priority mutexes owned: */
587 curthread->priority_mutex_count++;
590 * The mutex takes on attributes of the
591 * running thread when there are no waiters.
592 * Make sure the thread's scheduling lock is
593 * held while priorities are adjusted.
595 (*m)->m_prio = curthread->active_priority;
597 curthread->inherited_priority;
598 curthread->inherited_priority = (*m)->m_prio;
599 THR_UNLOCK(curthread);
601 /* Add to the list of owned mutexes: */
602 MUTEX_ASSERT_NOT_OWNED(*m);
603 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
606 /* Unlock the mutex structure: */
607 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
608 } else if ((*m)->m_owner == curthread) {
609 ret = mutex_self_lock(curthread, *m, abstime);
611 /* Unlock the mutex structure: */
612 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
615 * Join the queue of threads waiting to lock
616 * the mutex and save a pointer to the mutex.
618 mutex_queue_enq(*m, curthread);
619 curthread->data.mutex = *m;
621 if (curthread->active_priority > (*m)->m_prio)
622 /* Adjust priorities: */
623 mutex_priority_adjust(curthread, *m);
626 cycle = curthread->cycle;
627 THR_UNLOCK(curthread);
629 /* Unlock the mutex structure: */
630 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
632 clock_gettime(CLOCK_REALTIME, &ts);
633 TIMESPEC_SUB(&ts2, abstime, &ts);
634 ret = _thr_umtx_wait(&curthread->cycle, cycle,
639 if (THR_IN_MUTEXQ(curthread)) {
640 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
641 mutex_queue_remove(*m, curthread);
642 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
645 * Only clear these after assuring the
646 * thread is dequeued.
648 curthread->data.mutex = NULL;
652 /* POSIX priority protection mutex: */
653 case PTHREAD_PRIO_PROTECT:
654 /* Check for a priority ceiling violation: */
655 if (curthread->active_priority > (*m)->m_prio) {
656 /* Unlock the mutex structure: */
657 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
660 /* Check if this mutex is not locked: */
661 else if ((*m)->m_owner == NULL) {
663 * Lock the mutex for the running
666 (*m)->m_owner = curthread;
669 /* Track number of priority mutexes owned: */
670 curthread->priority_mutex_count++;
673 * The running thread inherits the ceiling
674 * priority of the mutex and executes at that
675 * priority. Make sure the thread's
676 * scheduling lock is held while priorities
679 curthread->active_priority = (*m)->m_prio;
681 curthread->inherited_priority;
682 curthread->inherited_priority = (*m)->m_prio;
683 THR_UNLOCK(curthread);
685 /* Add to the list of owned mutexes: */
686 MUTEX_ASSERT_NOT_OWNED(*m);
687 TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
690 /* Unlock the mutex structure: */
691 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
692 } else if ((*m)->m_owner == curthread) {
693 ret = mutex_self_lock(curthread, *m, abstime);
695 /* Unlock the mutex structure: */
696 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
699 * Join the queue of threads waiting to lock
700 * the mutex and save a pointer to the mutex.
702 mutex_queue_enq(*m, curthread);
703 curthread->data.mutex = *m;
705 /* Clear any previous error: */
706 curthread->error = 0;
709 cycle = curthread->cycle;
710 THR_UNLOCK(curthread);
712 /* Unlock the mutex structure: */
713 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
715 clock_gettime(CLOCK_REALTIME, &ts);
716 TIMESPEC_SUB(&ts2, abstime, &ts);
717 ret = _thr_umtx_wait(&curthread->cycle, cycle,
722 curthread->data.mutex = NULL;
723 if (THR_IN_MUTEXQ(curthread)) {
724 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
725 mutex_queue_remove(*m, curthread);
726 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
729 * Only clear these after assuring the
730 * thread is dequeued.
732 curthread->data.mutex = NULL;
735 * The threads priority may have changed while
736 * waiting for the mutex causing a ceiling
739 ret = curthread->error;
740 curthread->error = 0;
744 /* Trap invalid mutex types: */
746 /* Unlock the mutex structure: */
747 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
749 /* Return an invalid argument error: */
754 } while (((*m)->m_owner != curthread) && (ret == 0));
756 /* Return the completion status: */
761 __pthread_mutex_lock(pthread_mutex_t *m)
763 struct pthread *curthread;
768 curthread = _get_curthread();
771 * If the mutex is statically initialized, perform the dynamic
774 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
775 ret = mutex_lock_common(curthread, m, NULL);
781 _pthread_mutex_lock(pthread_mutex_t *m)
783 struct pthread *curthread;
788 curthread = _get_curthread();
791 * If the mutex is statically initialized, perform the dynamic
792 * initialization marking it private (delete safe):
795 ((ret = init_static_private(curthread, m)) == 0))
796 ret = mutex_lock_common(curthread, m, NULL);
802 __pthread_mutex_timedlock(pthread_mutex_t *m,
803 const struct timespec *abs_timeout)
805 struct pthread *curthread;
810 curthread = _get_curthread();
813 * If the mutex is statically initialized, perform the dynamic
816 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
817 ret = mutex_lock_common(curthread, m, abs_timeout);
823 _pthread_mutex_timedlock(pthread_mutex_t *m,
824 const struct timespec *abs_timeout)
826 struct pthread *curthread;
831 curthread = _get_curthread();
834 * If the mutex is statically initialized, perform the dynamic
835 * initialization marking it private (delete safe):
838 ((ret = init_static_private(curthread, m)) == 0))
839 ret = mutex_lock_common(curthread, m, abs_timeout);
845 _pthread_mutex_unlock(pthread_mutex_t *m)
847 return (mutex_unlock_common(m, /* add reference */ 0));
851 _mutex_cv_unlock(pthread_mutex_t *m)
853 return (mutex_unlock_common(m, /* add reference */ 1));
857 _mutex_cv_lock(pthread_mutex_t *m)
861 ret = mutex_lock_common(_get_curthread(), m, NULL);
868 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
873 /* case PTHREAD_MUTEX_DEFAULT: */
874 case PTHREAD_MUTEX_ERRORCHECK:
875 case PTHREAD_MUTEX_NORMAL:
879 case PTHREAD_MUTEX_RECURSIVE:
880 /* Increment the lock count: */
881 if (m->m_count + 1 > 0) {
889 /* Trap invalid mutex types; */
897 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
898 const struct timespec *abstime)
900 struct timespec ts1, ts2;
904 /* case PTHREAD_MUTEX_DEFAULT: */
905 case PTHREAD_MUTEX_ERRORCHECK:
907 clock_gettime(CLOCK_REALTIME, &ts1);
908 TIMESPEC_SUB(&ts2, abstime, &ts1);
909 __sys_nanosleep(&ts2, NULL);
913 * POSIX specifies that mutexes should return
914 * EDEADLK if a recursive lock is detected.
920 case PTHREAD_MUTEX_NORMAL:
922 * What SS2 define as a 'normal' mutex. Intentionally
923 * deadlock on attempts to get a lock you already own.
926 if (m->m_protocol != PTHREAD_PRIO_NONE) {
927 /* Unlock the mutex structure: */
928 THR_LOCK_RELEASE(curthread, &m->m_lock);
931 clock_gettime(CLOCK_REALTIME, &ts1);
932 TIMESPEC_SUB(&ts2, abstime, &ts1);
933 __sys_nanosleep(&ts2, NULL);
939 __sys_nanosleep(&ts1, NULL);
943 case PTHREAD_MUTEX_RECURSIVE:
944 /* Increment the lock count: */
945 if (m->m_count + 1 > 0) {
953 /* Trap invalid mutex types; */
961 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
963 struct pthread *curthread = _get_curthread();
967 if (m == NULL || *m == NULL)
970 /* Short cut for simple mutex. */
972 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
974 * Check if the running thread is not the owner of the
977 if (__predict_false((*m)->m_owner != curthread)) {
979 } else if (__predict_false(
980 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
981 (*m)->m_count > 0)) {
982 /* Decrement the count: */
988 * Clear the count in case this is a recursive
992 (*m)->m_owner = NULL;
993 /* Remove the mutex from the threads queue. */
994 MUTEX_ASSERT_IS_OWNED(*m);
995 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
1000 * Hand off the mutex to the next waiting
1003 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
1008 /* Code for priority mutex */
1010 /* Lock the mutex structure: */
1011 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1013 /* Process according to mutex type: */
1014 switch ((*m)->m_protocol) {
1015 /* POSIX priority inheritence mutex: */
1016 case PTHREAD_PRIO_INHERIT:
1018 * Check if the running thread is not the owner of the
1021 if ((*m)->m_owner != curthread)
1023 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1024 ((*m)->m_count > 0))
1025 /* Decrement the count: */
1029 * Clear the count in case this is recursive
1035 * Restore the threads inherited priority and
1036 * recompute the active priority (being careful
1037 * not to override changes in the threads base
1038 * priority subsequent to locking the mutex).
1040 THR_LOCK(curthread);
1041 curthread->inherited_priority =
1043 curthread->active_priority =
1044 MAX(curthread->inherited_priority,
1045 curthread->base_priority);
1048 * This thread now owns one less priority mutex.
1050 curthread->priority_mutex_count--;
1051 THR_UNLOCK(curthread);
1053 /* Remove the mutex from the threads queue. */
1054 MUTEX_ASSERT_IS_OWNED(*m);
1055 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1057 MUTEX_INIT_LINK(*m);
1060 * Hand off the mutex to the next waiting
1063 tid = mutex_handoff(curthread, *m);
1067 /* POSIX priority ceiling mutex: */
1068 case PTHREAD_PRIO_PROTECT:
1070 * Check if the running thread is not the owner of the
1073 if ((*m)->m_owner != curthread)
1075 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1076 ((*m)->m_count > 0))
1077 /* Decrement the count: */
1081 * Clear the count in case this is a recursive
1087 * Restore the threads inherited priority and
1088 * recompute the active priority (being careful
1089 * not to override changes in the threads base
1090 * priority subsequent to locking the mutex).
1092 THR_LOCK(curthread);
1093 curthread->inherited_priority =
1095 curthread->active_priority =
1096 MAX(curthread->inherited_priority,
1097 curthread->base_priority);
1100 * This thread now owns one less priority mutex.
1102 curthread->priority_mutex_count--;
1103 THR_UNLOCK(curthread);
1105 /* Remove the mutex from the threads queue. */
1106 MUTEX_ASSERT_IS_OWNED(*m);
1107 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1109 MUTEX_INIT_LINK(*m);
1112 * Hand off the mutex to the next waiting
1115 tid = mutex_handoff(curthread, *m);
1119 /* Trap invalid mutex types: */
1121 /* Return an invalid argument error: */
1126 if ((ret == 0) && (add_reference != 0))
1127 /* Increment the reference count: */
1130 /* Unlock the mutex structure: */
1131 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1134 /* Return the completion status: */
1140 * This function is called when a change in base priority occurs for
1141 * a thread that is holding or waiting for a priority protection or
1142 * inheritence mutex. A change in a threads base priority can effect
1143 * changes to active priorities of other threads and to the ordering
1144 * of mutex locking by waiting threads.
1146 * This must be called without the target thread's scheduling lock held.
1149 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1152 struct pthread_mutex *m;
1154 /* Adjust the priorites of any owned priority mutexes: */
1155 if (pthread->priority_mutex_count > 0) {
1157 * Rescan the mutexes owned by this thread and correct
1158 * their priorities to account for this threads change
1159 * in priority. This has the side effect of changing
1160 * the threads active priority.
1162 * Be sure to lock the first mutex in the list of owned
1163 * mutexes. This acts as a barrier against another
1164 * simultaneous call to change the threads priority
1165 * and from the owning thread releasing the mutex.
1167 m = TAILQ_FIRST(&pthread->pri_mutexq);
1169 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1171 * Make sure the thread still owns the lock.
1173 if (m == TAILQ_FIRST(&pthread->pri_mutexq))
1174 mutex_rescan_owned(curthread, pthread,
1175 /* rescan all owned */ NULL);
1176 THR_LOCK_RELEASE(curthread, &m->m_lock);
1181 * If this thread is waiting on a priority inheritence mutex,
1182 * check for priority adjustments. A change in priority can
1183 * also cause a ceiling violation(*) for a thread waiting on
1184 * a priority protection mutex; we don't perform the check here
1185 * as it is done in pthread_mutex_unlock.
1187 * (*) It should be noted that a priority change to a thread
1188 * _after_ taking and owning a priority ceiling mutex
1189 * does not affect ownership of that mutex; the ceiling
1190 * priority is only checked before mutex ownership occurs.
1192 if (propagate_prio != 0) {
1194 * Lock the thread's scheduling queue. This is a bit
1195 * convoluted; the "in synchronization queue flag" can
1196 * only be cleared with both the thread's scheduling and
1197 * mutex locks held. The thread's pointer to the wanted
1198 * mutex is guaranteed to be valid during this time.
1200 THR_THREAD_LOCK(curthread, pthread);
1202 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1203 ((m = pthread->data.mutex) == NULL))
1204 THR_THREAD_UNLOCK(curthread, pthread);
1207 * This thread is currently waiting on a mutex; unlock
1208 * the scheduling queue lock and lock the mutex. We
1209 * can't hold both at the same time because the locking
1210 * order could cause a deadlock.
1212 THR_THREAD_UNLOCK(curthread, pthread);
1213 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1216 * Check to make sure this thread is still in the
1217 * same state (the lock above can yield the CPU to
1218 * another thread or the thread may be running on
1221 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1222 (pthread->data.mutex == m)) {
1224 * Remove and reinsert this thread into
1225 * the list of waiting threads to preserve
1226 * decreasing priority order.
1228 mutex_queue_remove(m, pthread);
1229 mutex_queue_enq(m, pthread);
1231 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1232 /* Adjust priorities: */
1233 mutex_priority_adjust(curthread, m);
1236 /* Unlock the mutex structure: */
1237 THR_LOCK_RELEASE(curthread, &m->m_lock);
1243 * Called when a new thread is added to the mutex waiting queue or
1244 * when a threads priority changes that is already in the mutex
1247 * This must be called with the mutex locked by the current thread.
1250 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1252 pthread_mutex_t m = mutex;
1253 struct pthread *pthread_next, *pthread = mutex->m_owner;
1254 int done, temp_prio;
1257 * Calculate the mutex priority as the maximum of the highest
1258 * active priority of any waiting threads and the owning threads
1259 * active priority(*).
1261 * (*) Because the owning threads current active priority may
1262 * reflect priority inherited from this mutex (and the mutex
1263 * priority may have changed) we must recalculate the active
1264 * priority based on the threads saved inherited priority
1265 * and its base priority.
1267 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
1268 temp_prio = MAX(pthread_next->active_priority,
1269 MAX(m->m_saved_prio, pthread->base_priority));
1271 /* See if this mutex really needs adjusting: */
1272 if (temp_prio == m->m_prio)
1273 /* No need to propagate the priority: */
1276 /* Set new priority of the mutex: */
1277 m->m_prio = temp_prio;
1280 * Don't unlock the mutex passed in as an argument. It is
1281 * expected to be locked and unlocked by the caller.
1286 * Save the threads priority before rescanning the
1289 temp_prio = pthread->active_priority;
1292 * Fix the priorities for all mutexes held by the owning
1293 * thread since taking this mutex. This also has a
1294 * potential side-effect of changing the threads priority.
1296 * At this point the mutex is locked by the current thread.
1297 * The owning thread can't release the mutex until it is
1298 * unlocked, so we should be able to safely walk its list
1301 mutex_rescan_owned(curthread, pthread, m);
1304 * If this isn't the first time through the loop,
1305 * the current mutex needs to be unlocked.
1308 THR_LOCK_RELEASE(curthread, &m->m_lock);
1310 /* Assume we're done unless told otherwise: */
1314 * If the thread is currently waiting on a mutex, check
1315 * to see if the threads new priority has affected the
1316 * priority of the mutex.
1318 if ((temp_prio != pthread->active_priority) &&
1319 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1320 ((m = pthread->data.mutex) != NULL) &&
1321 (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1322 /* Lock the mutex structure: */
1323 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1326 * Make sure the thread is still waiting on the
1329 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1330 (m == pthread->data.mutex)) {
1332 * The priority for this thread has changed.
1333 * Remove and reinsert this thread into the
1334 * list of waiting threads to preserve
1335 * decreasing priority order.
1337 mutex_queue_remove(m, pthread);
1338 mutex_queue_enq(m, pthread);
1341 * Grab the waiting thread with highest
1344 pthread_next = TAILQ_FIRST(&m->m_queue);
1347 * Calculate the mutex priority as the maximum
1348 * of the highest active priority of any
1349 * waiting threads and the owning threads
1352 temp_prio = MAX(pthread_next->active_priority,
1353 MAX(m->m_saved_prio,
1354 m->m_owner->base_priority));
1356 if (temp_prio != m->m_prio) {
1358 * The priority needs to be propagated
1359 * to the mutex this thread is waiting
1360 * on and up to the owner of that mutex.
1362 m->m_prio = temp_prio;
1363 pthread = m->m_owner;
1365 /* We're not done yet: */
1369 /* Only release the mutex if we're done: */
1371 THR_LOCK_RELEASE(curthread, &m->m_lock);
1373 } while (done == 0);
1377 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1378 struct pthread_mutex *mutex)
1380 struct pthread_mutex *m;
1381 struct pthread *pthread_next;
1382 int active_prio, inherited_prio;
1385 * Start walking the mutexes the thread has taken since
1386 * taking this mutex.
1388 if (mutex == NULL) {
1390 * A null mutex means start at the beginning of the owned
1393 m = TAILQ_FIRST(&pthread->pri_mutexq);
1395 /* There is no inherited priority yet. */
1399 * The caller wants to start after a specific mutex. It
1400 * is assumed that this mutex is a priority inheritence
1401 * mutex and that its priority has been correctly
1404 m = TAILQ_NEXT(mutex, m_qe);
1406 /* Start inheriting priority from the specified mutex. */
1407 inherited_prio = mutex->m_prio;
1409 active_prio = MAX(inherited_prio, pthread->base_priority);
1411 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1413 * We only want to deal with priority inheritence
1414 * mutexes. This might be optimized by only placing
1415 * priority inheritence mutexes into the owned mutex
1416 * list, but it may prove to be useful having all
1417 * owned mutexes in this list. Consider a thread
1418 * exiting while holding mutexes...
1420 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1422 * Fix the owners saved (inherited) priority to
1423 * reflect the priority of the previous mutex.
1425 m->m_saved_prio = inherited_prio;
1427 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1428 /* Recalculate the priority of the mutex: */
1429 m->m_prio = MAX(active_prio,
1430 pthread_next->active_priority);
1432 m->m_prio = active_prio;
1434 /* Recalculate new inherited and active priorities: */
1435 inherited_prio = m->m_prio;
1436 active_prio = MAX(m->m_prio, pthread->base_priority);
1441 * Fix the threads inherited priority and recalculate its
1444 pthread->inherited_priority = inherited_prio;
1445 active_prio = MAX(inherited_prio, pthread->base_priority);
1447 if (active_prio != pthread->active_priority) {
1448 /* Lock the thread's scheduling queue: */
1449 THR_THREAD_LOCK(curthread, pthread);
1451 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
1454 * This thread is not in a run queue. Just set
1455 * its active priority.
1457 pthread->active_priority = active_prio;
1461 * This thread is in a run queue. Remove it from
1462 * the queue before changing its priority:
1464 /* THR_RUNQ_REMOVE(pthread);*/
1466 * POSIX states that if the priority is being
1467 * lowered, the thread must be inserted at the
1468 * head of the queue for its priority if it owns
1469 * any priority protection or inheritence mutexes.
1471 if ((active_prio < pthread->active_priority) &&
1472 (pthread->priority_mutex_count > 0)) {
1473 /* Set the new active priority. */
1474 pthread->active_priority = active_prio;
1475 /* THR_RUNQ_INSERT_HEAD(pthread); */
1477 /* Set the new active priority. */
1478 pthread->active_priority = active_prio;
1479 /* THR_RUNQ_INSERT_TAIL(pthread);*/
1482 THR_THREAD_UNLOCK(curthread, pthread);
1487 _mutex_unlock_private(pthread_t pthread)
1489 struct pthread_mutex *m, *m_next;
1491 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
1492 m_next = TAILQ_NEXT(m, m_qe);
1493 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1494 pthread_mutex_unlock(&m);
1499 * Dequeue a waiting thread from the head of a mutex queue in descending
1502 * In order to properly dequeue a thread from the mutex queue and
1503 * make it runnable without the possibility of errant wakeups, it
1504 * is necessary to lock the thread's scheduling queue while also
1505 * holding the mutex lock.
1508 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1510 struct pthread *pthread;
1513 /* Keep dequeueing until we find a valid thread: */
1514 mutex->m_owner = NULL;
1515 pthread = TAILQ_FIRST(&mutex->m_queue);
1516 while (pthread != NULL) {
1517 /* Take the thread's scheduling lock: */
1518 THR_THREAD_LOCK(curthread, pthread);
1520 /* Remove the thread from the mutex queue: */
1521 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1522 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1525 * Only exit the loop if the thread hasn't been
1528 switch (mutex->m_protocol) {
1529 case PTHREAD_PRIO_NONE:
1531 * Assign the new owner and add the mutex to the
1532 * thread's list of owned mutexes.
1534 mutex->m_owner = pthread;
1535 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1538 case PTHREAD_PRIO_INHERIT:
1540 * Assign the new owner and add the mutex to the
1541 * thread's list of owned mutexes.
1543 mutex->m_owner = pthread;
1544 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1546 /* Track number of priority mutexes owned: */
1547 pthread->priority_mutex_count++;
1550 * Set the priority of the mutex. Since our waiting
1551 * threads are in descending priority order, the
1552 * priority of the mutex becomes the active priority
1553 * of the thread we just dequeued.
1555 mutex->m_prio = pthread->active_priority;
1557 /* Save the owning threads inherited priority: */
1558 mutex->m_saved_prio = pthread->inherited_priority;
1561 * The owning threads inherited priority now becomes
1562 * his active priority (the priority of the mutex).
1564 pthread->inherited_priority = mutex->m_prio;
1567 case PTHREAD_PRIO_PROTECT:
1568 if (pthread->active_priority > mutex->m_prio) {
1570 * Either the mutex ceiling priority has
1571 * been lowered and/or this threads priority
1572 * has been raised subsequent to the thread
1573 * being queued on the waiting list.
1575 pthread->error = EINVAL;
1579 * Assign the new owner and add the mutex
1580 * to the thread's list of owned mutexes.
1582 mutex->m_owner = pthread;
1583 TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
1586 /* Track number of priority mutexes owned: */
1587 pthread->priority_mutex_count++;
1590 * Save the owning threads inherited
1593 mutex->m_saved_prio =
1594 pthread->inherited_priority;
1597 * The owning thread inherits the ceiling
1598 * priority of the mutex and executes at
1601 pthread->inherited_priority = mutex->m_prio;
1602 pthread->active_priority = mutex->m_prio;
1608 /* Make the thread runnable and unlock the scheduling queue: */
1610 _thr_umtx_wake(&pthread->cycle, 1);
1612 THR_THREAD_UNLOCK(curthread, pthread);
1613 if (mutex->m_owner == pthread)
1614 /* We're done; a valid owner was found. */
1617 /* Get the next thread from the waiting queue: */
1618 pthread = TAILQ_NEXT(pthread, sqe);
1621 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1622 /* This mutex has no priority: */
1629 * Dequeue a waiting thread from the head of a mutex queue in descending
1633 mutex_queue_deq(struct pthread_mutex *mutex)
1637 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1638 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1639 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1647 * Remove a waiting thread from a mutex queue in descending priority order.
1650 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1652 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1653 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1654 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1659 * Enqueue a waiting thread to a queue in descending priority order.
1662 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1664 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1666 THR_ASSERT_NOT_IN_SYNCQ(pthread);
1668 * For the common case of all threads having equal priority,
1669 * we perform a quick check against the priority of the thread
1670 * at the tail of the queue.
1672 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1673 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1675 tid = TAILQ_FIRST(&mutex->m_queue);
1676 while (pthread->active_priority <= tid->active_priority)
1677 tid = TAILQ_NEXT(tid, sqe);
1678 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1680 pthread->sflags |= THR_FLAGS_IN_SYNCQ;