2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include "namespace.h"
37 #include "un-namespace.h"
38 #include "thr_private.h"
40 #define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
41 #define THR_CONDQ_SET(thr) (thr)->sflags |= THR_FLAGS_IN_SYNCQ
42 #define THR_CONDQ_CLEAR(thr) (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
47 static inline struct pthread *cond_queue_deq(pthread_cond_t);
48 static inline void cond_queue_remove(pthread_cond_t, pthread_t);
49 static inline void cond_queue_enq(pthread_cond_t, pthread_t);
50 static void cond_wait_backout(void *);
51 static inline void check_continuation(struct pthread *,
52 struct pthread_cond *, pthread_mutex_t *);
54 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
55 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
56 const struct timespec *abstime);
59 * Double underscore versions are cancellation points. Single underscore
60 * versions are not and are provided for libc internal usage (which
61 * shouldn't introduce cancellation points).
63 __weak_reference(__pthread_cond_wait, pthread_cond_wait);
64 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
66 __weak_reference(_pthread_cond_init, pthread_cond_init);
67 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
68 __weak_reference(_pthread_cond_signal, pthread_cond_signal);
69 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
73 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
75 enum pthread_cond_type type;
84 * Check if a pointer to a condition variable attribute
85 * structure was passed by the caller:
87 if (cond_attr != NULL && *cond_attr != NULL) {
88 /* Default to a fast condition variable: */
89 type = (*cond_attr)->c_type;
90 flags = (*cond_attr)->c_flags;
92 /* Default to a fast condition variable: */
93 type = COND_TYPE_FAST;
97 /* Process according to condition variable type: */
99 /* Fast condition variable: */
101 /* Nothing to do here. */
104 /* Trap invalid condition variable types: */
106 /* Return an invalid argument error: */
111 /* Check for no errors: */
113 if ((pcond = (pthread_cond_t)
114 malloc(sizeof(struct pthread_cond))) == NULL) {
116 } else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
117 _thr_lock_wait, _thr_lock_wakeup, calloc) != 0) {
122 * Initialise the condition variable
125 TAILQ_INIT(&pcond->c_queue);
126 pcond->c_flags = COND_FLAGS_INITED;
127 pcond->c_type = type;
128 pcond->c_mutex = NULL;
134 /* Return the completion status: */
139 _pthread_cond_destroy(pthread_cond_t *cond)
141 struct pthread_cond *cv;
142 struct pthread *curthread = _get_curthread();
145 if (cond == NULL || *cond == NULL)
148 /* Lock the condition variable structure: */
149 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
152 * NULL the caller's pointer now that the condition
153 * variable has been destroyed:
158 /* Unlock the condition variable structure: */
159 THR_LOCK_RELEASE(curthread, &cv->c_lock);
161 /* Free the cond lock structure: */
162 _lock_destroy(&cv->c_lock);
165 * Free the memory allocated for the condition
166 * variable structure:
171 /* Return the completion status: */
176 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
178 struct pthread *curthread = _get_curthread();
181 int mutex_locked = 1;
188 * If the condition variable is statically initialized,
189 * perform the dynamic initialization:
192 (rval = _pthread_cond_init(cond, NULL)) != 0)
195 if (!_kse_isthreaded())
199 * Enter a loop waiting for a condition signal or broadcast
200 * to wake up this thread. A loop is needed in case the waiting
201 * thread is interrupted by a signal to execute a signal handler.
202 * It is not (currently) possible to remain in the waiting queue
203 * while running a handler. Instead, the thread is interrupted
204 * and backed out of the waiting queue prior to executing the
208 /* Lock the condition variable structure: */
209 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
210 seqno = (*cond)->c_seqno;
213 * If the condvar was statically allocated, properly
214 * initialize the tail queue.
216 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
217 TAILQ_INIT(&(*cond)->c_queue);
218 (*cond)->c_flags |= COND_FLAGS_INITED;
221 /* Process according to condition variable type: */
222 switch ((*cond)->c_type) {
223 /* Fast condition variable: */
225 if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
226 ((*cond)->c_mutex != *mutex))) {
227 /* Return invalid argument error: */
230 /* Reset the timeout and interrupted flags: */
231 curthread->timeout = 0;
232 curthread->interrupted = 0;
235 * Queue the running thread for the condition
238 cond_queue_enq(*cond, curthread);
241 curthread->wakeup_time.tv_sec = -1;
243 /* Unlock the mutex: */
245 ((rval = _mutex_cv_unlock(mutex)) != 0)) {
247 * Cannot unlock the mutex, so remove
248 * the running thread from the condition
251 cond_queue_remove(*cond, curthread);
254 /* Remember the mutex: */
255 (*cond)->c_mutex = *mutex;
258 * Don't unlock the mutex the next
259 * time through the loop (if the
260 * thread has to be requeued after
261 * handling a signal).
266 * This thread is active and is in a
267 * critical region (holding the cv
268 * lock); we should be able to safely
271 THR_SCHED_LOCK(curthread, curthread);
272 THR_SET_STATE(curthread, PS_COND_WAIT);
274 /* Remember the CV: */
275 curthread->data.cond = *cond;
276 curthread->sigbackout = cond_wait_backout;
277 THR_SCHED_UNLOCK(curthread, curthread);
279 /* Unlock the CV structure: */
280 THR_LOCK_RELEASE(curthread,
283 /* Schedule the next thread: */
284 _thr_sched_switch(curthread);
287 * XXX - This really isn't a good check
288 * since there can be more than one
289 * thread waiting on the CV. Signals
290 * sent to threads waiting on mutexes
291 * or CVs should really be deferred
292 * until the threads are no longer
293 * waiting, but POSIX says that signals
294 * should be sent "as soon as possible".
296 done = (seqno != (*cond)->c_seqno);
297 if (done && !THR_IN_CONDQ(curthread)) {
299 * The thread is dequeued, so
300 * it is safe to clear these.
302 curthread->data.cond = NULL;
303 curthread->sigbackout = NULL;
304 check_continuation(curthread,
306 return (_mutex_cv_lock(mutex));
309 /* Relock the CV structure: */
310 THR_LOCK_ACQUIRE(curthread,
314 * Clear these after taking the lock to
315 * prevent a race condition where a
316 * signal can arrive before dequeueing
319 curthread->data.cond = NULL;
320 curthread->sigbackout = NULL;
321 done = (seqno != (*cond)->c_seqno);
323 if (THR_IN_CONDQ(curthread)) {
324 cond_queue_remove(*cond,
327 /* Check for no more waiters: */
328 if (TAILQ_EMPTY(&(*cond)->c_queue))
329 (*cond)->c_mutex = NULL;
335 /* Trap invalid condition variable types: */
337 /* Return an invalid argument error: */
342 check_continuation(curthread, *cond,
343 mutex_locked ? NULL : mutex);
344 } while ((done == 0) && (rval == 0));
346 /* Unlock the condition variable structure: */
347 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
349 if (mutex_locked == 0)
350 _mutex_cv_lock(mutex);
352 /* Return the completion status: */
356 __strong_reference(_pthread_cond_wait, _thr_cond_wait);
359 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
361 struct pthread *curthread = _get_curthread();
364 _thr_cancel_enter(curthread);
365 ret = _pthread_cond_wait(cond, mutex);
366 _thr_cancel_leave(curthread, 1);
371 _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
372 const struct timespec * abstime)
374 struct pthread *curthread = _get_curthread();
377 int mutex_locked = 1;
380 THR_ASSERT(curthread->locklevel == 0,
381 "cv_timedwait: locklevel is not zero!");
383 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
384 abstime->tv_nsec >= 1000000000)
387 * If the condition variable is statically initialized, perform dynamic
390 if (*cond == NULL && (rval = _pthread_cond_init(cond, NULL)) != 0)
393 if (!_kse_isthreaded())
397 * Enter a loop waiting for a condition signal or broadcast
398 * to wake up this thread. A loop is needed in case the waiting
399 * thread is interrupted by a signal to execute a signal handler.
400 * It is not (currently) possible to remain in the waiting queue
401 * while running a handler. Instead, the thread is interrupted
402 * and backed out of the waiting queue prior to executing the
406 /* Lock the condition variable structure: */
407 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
408 seqno = (*cond)->c_seqno;
411 * If the condvar was statically allocated, properly
412 * initialize the tail queue.
414 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
415 TAILQ_INIT(&(*cond)->c_queue);
416 (*cond)->c_flags |= COND_FLAGS_INITED;
419 /* Process according to condition variable type: */
420 switch ((*cond)->c_type) {
421 /* Fast condition variable: */
423 if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
424 ((*cond)->c_mutex != *mutex))) {
425 /* Return invalid argument error: */
428 /* Reset the timeout and interrupted flags: */
429 curthread->timeout = 0;
430 curthread->interrupted = 0;
433 * Queue the running thread for the condition
436 cond_queue_enq(*cond, curthread);
438 /* Unlock the mutex: */
440 ((rval = _mutex_cv_unlock(mutex)) != 0)) {
442 * Cannot unlock the mutex; remove the
443 * running thread from the condition
446 cond_queue_remove(*cond, curthread);
448 /* Remember the mutex: */
449 (*cond)->c_mutex = *mutex;
452 * Don't unlock the mutex the next
453 * time through the loop (if the
454 * thread has to be requeued after
455 * handling a signal).
460 * This thread is active and is in a
461 * critical region (holding the cv
462 * lock); we should be able to safely
465 THR_SCHED_LOCK(curthread, curthread);
466 /* Set the wakeup time: */
467 curthread->wakeup_time.tv_sec =
469 curthread->wakeup_time.tv_nsec =
471 THR_SET_STATE(curthread, PS_COND_WAIT);
473 /* Remember the CV: */
474 curthread->data.cond = *cond;
475 curthread->sigbackout = cond_wait_backout;
476 THR_SCHED_UNLOCK(curthread, curthread);
478 /* Unlock the CV structure: */
479 THR_LOCK_RELEASE(curthread,
482 /* Schedule the next thread: */
483 _thr_sched_switch(curthread);
486 * XXX - This really isn't a good check
487 * since there can be more than one
488 * thread waiting on the CV. Signals
489 * sent to threads waiting on mutexes
490 * or CVs should really be deferred
491 * until the threads are no longer
492 * waiting, but POSIX says that signals
493 * should be sent "as soon as possible".
495 done = (seqno != (*cond)->c_seqno);
496 if (done && !THR_IN_CONDQ(curthread)) {
498 * The thread is dequeued, so
499 * it is safe to clear these.
501 curthread->data.cond = NULL;
502 curthread->sigbackout = NULL;
503 check_continuation(curthread,
505 return (_mutex_cv_lock(mutex));
508 /* Relock the CV structure: */
509 THR_LOCK_ACQUIRE(curthread,
513 * Clear these after taking the lock to
514 * prevent a race condition where a
515 * signal can arrive before dequeueing
518 curthread->data.cond = NULL;
519 curthread->sigbackout = NULL;
521 done = (seqno != (*cond)->c_seqno);
523 if (THR_IN_CONDQ(curthread)) {
524 cond_queue_remove(*cond,
527 /* Check for no more waiters: */
528 if (TAILQ_EMPTY(&(*cond)->c_queue))
529 (*cond)->c_mutex = NULL;
532 if (curthread->timeout != 0) {
533 /* The wait timedout. */
540 /* Trap invalid condition variable types: */
542 /* Return an invalid argument error: */
547 check_continuation(curthread, *cond,
548 mutex_locked ? NULL : mutex);
549 } while ((done == 0) && (rval == 0));
551 /* Unlock the condition variable structure: */
552 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
554 if (mutex_locked == 0)
555 _mutex_cv_lock(mutex);
557 /* Return the completion status: */
562 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
563 const struct timespec *abstime)
565 struct pthread *curthread = _get_curthread();
568 _thr_cancel_enter(curthread);
569 ret = _pthread_cond_timedwait(cond, mutex, abstime);
570 _thr_cancel_leave(curthread, 1);
576 _pthread_cond_signal(pthread_cond_t * cond)
578 struct pthread *curthread = _get_curthread();
579 struct pthread *pthread;
580 struct kse_mailbox *kmbx;
583 THR_ASSERT(curthread->locklevel == 0,
584 "cv_timedwait: locklevel is not zero!");
588 * If the condition variable is statically initialized, perform dynamic
591 else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
592 /* Lock the condition variable structure: */
593 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
595 /* Process according to condition variable type: */
596 switch ((*cond)->c_type) {
597 /* Fast condition variable: */
599 /* Increment the sequence number: */
603 * Wakeups have to be done with the CV lock held;
604 * otherwise there is a race condition where the
605 * thread can timeout, run on another KSE, and enter
606 * another blocking state (including blocking on a CV).
608 if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
610 THR_SCHED_LOCK(curthread, pthread);
611 cond_queue_remove(*cond, pthread);
612 pthread->sigbackout = NULL;
613 if ((pthread->kseg == curthread->kseg) &&
614 (pthread->active_priority >
615 curthread->active_priority))
616 curthread->critical_yield = 1;
617 kmbx = _thr_setrunnable_unlocked(pthread);
618 THR_SCHED_UNLOCK(curthread, pthread);
622 /* Check for no more waiters: */
623 if (TAILQ_EMPTY(&(*cond)->c_queue))
624 (*cond)->c_mutex = NULL;
627 /* Trap invalid condition variable types: */
629 /* Return an invalid argument error: */
634 /* Unlock the condition variable structure: */
635 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
638 /* Return the completion status: */
642 __strong_reference(_pthread_cond_signal, _thr_cond_signal);
645 _pthread_cond_broadcast(pthread_cond_t * cond)
647 struct pthread *curthread = _get_curthread();
648 struct pthread *pthread;
649 struct kse_mailbox *kmbx;
652 THR_ASSERT(curthread->locklevel == 0,
653 "cv_timedwait: locklevel is not zero!");
657 * If the condition variable is statically initialized, perform dynamic
660 else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
661 /* Lock the condition variable structure: */
662 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
664 /* Process according to condition variable type: */
665 switch ((*cond)->c_type) {
666 /* Fast condition variable: */
668 /* Increment the sequence number: */
672 * Enter a loop to bring all threads off the
675 while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
677 THR_SCHED_LOCK(curthread, pthread);
678 cond_queue_remove(*cond, pthread);
679 pthread->sigbackout = NULL;
680 if ((pthread->kseg == curthread->kseg) &&
681 (pthread->active_priority >
682 curthread->active_priority))
683 curthread->critical_yield = 1;
684 kmbx = _thr_setrunnable_unlocked(pthread);
685 THR_SCHED_UNLOCK(curthread, pthread);
690 /* There are no more waiting threads: */
691 (*cond)->c_mutex = NULL;
694 /* Trap invalid condition variable types: */
696 /* Return an invalid argument error: */
701 /* Unlock the condition variable structure: */
702 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
705 /* Return the completion status: */
709 __strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast);
712 check_continuation(struct pthread *curthread, struct pthread_cond *cond,
713 pthread_mutex_t *mutex)
715 if ((curthread->interrupted != 0) &&
716 (curthread->continuation != NULL)) {
718 /* Unlock the condition variable structure: */
719 THR_LOCK_RELEASE(curthread, &cond->c_lock);
721 * Note that even though this thread may have been
722 * canceled, POSIX requires that the mutex be
723 * reaquired prior to cancellation.
726 _mutex_cv_lock(mutex);
727 curthread->continuation((void *) curthread);
728 PANIC("continuation returned in pthread_cond_wait.\n");
733 cond_wait_backout(void *arg)
735 struct pthread *curthread = (struct pthread *)arg;
738 cond = curthread->data.cond;
740 /* Lock the condition variable structure: */
741 THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
743 /* Process according to condition variable type: */
744 switch (cond->c_type) {
745 /* Fast condition variable: */
747 cond_queue_remove(cond, curthread);
749 /* Check for no more waiters: */
750 if (TAILQ_EMPTY(&cond->c_queue))
751 cond->c_mutex = NULL;
758 /* Unlock the condition variable structure: */
759 THR_LOCK_RELEASE(curthread, &cond->c_lock);
761 /* No need to call this again. */
762 curthread->sigbackout = NULL;
766 * Dequeue a waiting thread from the head of a condition queue in
767 * descending priority order.
769 static inline struct pthread *
770 cond_queue_deq(pthread_cond_t cond)
772 struct pthread *pthread;
774 while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
775 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
776 THR_CONDQ_CLEAR(pthread);
777 if ((pthread->timeout == 0) && (pthread->interrupted == 0))
779 * Only exit the loop when we find a thread
780 * that hasn't timed out or been canceled;
781 * those threads are already running and don't
782 * need their run state changed.
791 * Remove a waiting thread from a condition queue in descending priority
795 cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
798 * Because pthread_cond_timedwait() can timeout as well
799 * as be signaled by another thread, it is necessary to
800 * guard against removing the thread from the queue if
801 * it isn't in the queue.
803 if (THR_IN_CONDQ(pthread)) {
804 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
805 THR_CONDQ_CLEAR(pthread);
810 * Enqueue a waiting thread to a condition queue in descending priority
814 cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
816 struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
818 THR_ASSERT(!THR_IN_SYNCQ(pthread),
819 "cond_queue_enq: thread already queued!");
822 * For the common case of all threads having equal priority,
823 * we perform a quick check against the priority of the thread
824 * at the tail of the queue.
826 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
827 TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
829 tid = TAILQ_FIRST(&cond->c_queue);
830 while (pthread->active_priority <= tid->active_priority)
831 tid = TAILQ_NEXT(tid, sqe);
832 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
834 THR_CONDQ_SET(pthread);
835 pthread->data.cond = cond;