2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include "thr_private.h"
37 LT10_COMPAT_PRIVATE(__pthread_cond_wait);
38 LT10_COMPAT_PRIVATE(_pthread_cond_wait);
39 LT10_COMPAT_DEFAULT(pthread_cond_wait);
40 LT10_COMPAT_PRIVATE(__pthread_cond_timedwait);
41 LT10_COMPAT_PRIVATE(_pthread_cond_timedwait);
42 LT10_COMPAT_DEFAULT(pthread_cond_timedwait);
43 LT10_COMPAT_PRIVATE(_pthread_cond_init);
44 LT10_COMPAT_DEFAULT(pthread_cond_init);
45 LT10_COMPAT_PRIVATE(_pthread_cond_destroy);
46 LT10_COMPAT_DEFAULT(pthread_cond_destroy);
47 LT10_COMPAT_PRIVATE(_pthread_cond_signal);
48 LT10_COMPAT_DEFAULT(pthread_cond_signal);
49 LT10_COMPAT_PRIVATE(_pthread_cond_broadcast);
50 LT10_COMPAT_DEFAULT(pthread_cond_broadcast);
52 #define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
53 #define THR_CONDQ_SET(thr) (thr)->sflags |= THR_FLAGS_IN_SYNCQ
54 #define THR_CONDQ_CLEAR(thr) (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
59 static inline struct pthread *cond_queue_deq(pthread_cond_t);
60 static inline void cond_queue_remove(pthread_cond_t, pthread_t);
61 static inline void cond_queue_enq(pthread_cond_t, pthread_t);
62 static void cond_wait_backout(void *);
63 static inline void check_continuation(struct pthread *,
64 struct pthread_cond *, pthread_mutex_t *);
67 * Double underscore versions are cancellation points. Single underscore
68 * versions are not and are provided for libc internal usage (which
69 * shouldn't introduce cancellation points).
71 __weak_reference(__pthread_cond_wait, pthread_cond_wait);
72 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
74 __weak_reference(_pthread_cond_init, pthread_cond_init);
75 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
76 __weak_reference(_pthread_cond_signal, pthread_cond_signal);
77 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
81 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
83 enum pthread_cond_type type;
92 * Check if a pointer to a condition variable attribute
93 * structure was passed by the caller:
95 if (cond_attr != NULL && *cond_attr != NULL) {
96 /* Default to a fast condition variable: */
97 type = (*cond_attr)->c_type;
98 flags = (*cond_attr)->c_flags;
100 /* Default to a fast condition variable: */
101 type = COND_TYPE_FAST;
105 /* Process according to condition variable type: */
107 /* Fast condition variable: */
109 /* Nothing to do here. */
112 /* Trap invalid condition variable types: */
114 /* Return an invalid argument error: */
119 /* Check for no errors: */
121 if ((pcond = (pthread_cond_t)
122 malloc(sizeof(struct pthread_cond))) == NULL) {
124 } else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
125 _thr_lock_wait, _thr_lock_wakeup, calloc) != 0) {
130 * Initialise the condition variable
133 TAILQ_INIT(&pcond->c_queue);
134 pcond->c_flags = COND_FLAGS_INITED;
135 pcond->c_type = type;
136 pcond->c_mutex = NULL;
142 /* Return the completion status: */
147 _pthread_cond_destroy(pthread_cond_t *cond)
149 struct pthread_cond *cv;
150 struct pthread *curthread = _get_curthread();
153 if (cond == NULL || *cond == NULL)
156 /* Lock the condition variable structure: */
157 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
160 * NULL the caller's pointer now that the condition
161 * variable has been destroyed:
166 /* Unlock the condition variable structure: */
167 THR_LOCK_RELEASE(curthread, &cv->c_lock);
169 /* Free the cond lock structure: */
170 _lock_destroy(&cv->c_lock);
173 * Free the memory allocated for the condition
174 * variable structure:
179 /* Return the completion status: */
184 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
186 struct pthread *curthread = _get_curthread();
189 int mutex_locked = 1;
196 * If the condition variable is statically initialized,
197 * perform the dynamic initialization:
200 (rval = pthread_cond_init(cond, NULL)) != 0)
203 if (!_kse_isthreaded())
207 * Enter a loop waiting for a condition signal or broadcast
208 * to wake up this thread. A loop is needed in case the waiting
209 * thread is interrupted by a signal to execute a signal handler.
210 * It is not (currently) possible to remain in the waiting queue
211 * while running a handler. Instead, the thread is interrupted
212 * and backed out of the waiting queue prior to executing the
216 /* Lock the condition variable structure: */
217 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
218 seqno = (*cond)->c_seqno;
221 * If the condvar was statically allocated, properly
222 * initialize the tail queue.
224 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
225 TAILQ_INIT(&(*cond)->c_queue);
226 (*cond)->c_flags |= COND_FLAGS_INITED;
229 /* Process according to condition variable type: */
230 switch ((*cond)->c_type) {
231 /* Fast condition variable: */
233 if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
234 ((*cond)->c_mutex != *mutex))) {
235 /* Return invalid argument error: */
238 /* Reset the timeout and interrupted flags: */
239 curthread->timeout = 0;
240 curthread->interrupted = 0;
243 * Queue the running thread for the condition
246 cond_queue_enq(*cond, curthread);
249 curthread->wakeup_time.tv_sec = -1;
251 /* Unlock the mutex: */
253 ((rval = _mutex_cv_unlock(mutex)) != 0)) {
255 * Cannot unlock the mutex, so remove
256 * the running thread from the condition
259 cond_queue_remove(*cond, curthread);
262 /* Remember the mutex: */
263 (*cond)->c_mutex = *mutex;
266 * Don't unlock the mutex the next
267 * time through the loop (if the
268 * thread has to be requeued after
269 * handling a signal).
274 * This thread is active and is in a
275 * critical region (holding the cv
276 * lock); we should be able to safely
279 THR_SCHED_LOCK(curthread, curthread);
280 THR_SET_STATE(curthread, PS_COND_WAIT);
282 /* Remember the CV: */
283 curthread->data.cond = *cond;
284 curthread->sigbackout = cond_wait_backout;
285 THR_SCHED_UNLOCK(curthread, curthread);
287 /* Unlock the CV structure: */
288 THR_LOCK_RELEASE(curthread,
291 /* Schedule the next thread: */
292 _thr_sched_switch(curthread);
295 * XXX - This really isn't a good check
296 * since there can be more than one
297 * thread waiting on the CV. Signals
298 * sent to threads waiting on mutexes
299 * or CVs should really be deferred
300 * until the threads are no longer
301 * waiting, but POSIX says that signals
302 * should be sent "as soon as possible".
304 done = (seqno != (*cond)->c_seqno);
305 if (done && !THR_IN_CONDQ(curthread)) {
307 * The thread is dequeued, so
308 * it is safe to clear these.
310 curthread->data.cond = NULL;
311 curthread->sigbackout = NULL;
312 check_continuation(curthread,
314 return (_mutex_cv_lock(mutex));
317 /* Relock the CV structure: */
318 THR_LOCK_ACQUIRE(curthread,
322 * Clear these after taking the lock to
323 * prevent a race condition where a
324 * signal can arrive before dequeueing
327 curthread->data.cond = NULL;
328 curthread->sigbackout = NULL;
329 done = (seqno != (*cond)->c_seqno);
331 if (THR_IN_CONDQ(curthread)) {
332 cond_queue_remove(*cond,
335 /* Check for no more waiters: */
336 if (TAILQ_EMPTY(&(*cond)->c_queue))
337 (*cond)->c_mutex = NULL;
343 /* Trap invalid condition variable types: */
345 /* Return an invalid argument error: */
350 check_continuation(curthread, *cond,
351 mutex_locked ? NULL : mutex);
352 } while ((done == 0) && (rval == 0));
354 /* Unlock the condition variable structure: */
355 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
357 if (mutex_locked == 0)
358 _mutex_cv_lock(mutex);
360 /* Return the completion status: */
364 __strong_reference(_pthread_cond_wait, _thr_cond_wait);
367 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
369 struct pthread *curthread = _get_curthread();
372 _thr_cancel_enter(curthread);
373 ret = _pthread_cond_wait(cond, mutex);
374 _thr_cancel_leave(curthread, 1);
379 _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
380 const struct timespec * abstime)
382 struct pthread *curthread = _get_curthread();
385 int mutex_locked = 1;
388 THR_ASSERT(curthread->locklevel == 0,
389 "cv_timedwait: locklevel is not zero!");
391 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
392 abstime->tv_nsec >= 1000000000)
395 * If the condition variable is statically initialized, perform dynamic
398 if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
401 if (!_kse_isthreaded())
405 * Enter a loop waiting for a condition signal or broadcast
406 * to wake up this thread. A loop is needed in case the waiting
407 * thread is interrupted by a signal to execute a signal handler.
408 * It is not (currently) possible to remain in the waiting queue
409 * while running a handler. Instead, the thread is interrupted
410 * and backed out of the waiting queue prior to executing the
414 /* Lock the condition variable structure: */
415 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
416 seqno = (*cond)->c_seqno;
419 * If the condvar was statically allocated, properly
420 * initialize the tail queue.
422 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
423 TAILQ_INIT(&(*cond)->c_queue);
424 (*cond)->c_flags |= COND_FLAGS_INITED;
427 /* Process according to condition variable type: */
428 switch ((*cond)->c_type) {
429 /* Fast condition variable: */
431 if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
432 ((*cond)->c_mutex != *mutex))) {
433 /* Return invalid argument error: */
436 /* Reset the timeout and interrupted flags: */
437 curthread->timeout = 0;
438 curthread->interrupted = 0;
441 * Queue the running thread for the condition
444 cond_queue_enq(*cond, curthread);
446 /* Unlock the mutex: */
448 ((rval = _mutex_cv_unlock(mutex)) != 0)) {
450 * Cannot unlock the mutex; remove the
451 * running thread from the condition
454 cond_queue_remove(*cond, curthread);
456 /* Remember the mutex: */
457 (*cond)->c_mutex = *mutex;
460 * Don't unlock the mutex the next
461 * time through the loop (if the
462 * thread has to be requeued after
463 * handling a signal).
468 * This thread is active and is in a
469 * critical region (holding the cv
470 * lock); we should be able to safely
473 THR_SCHED_LOCK(curthread, curthread);
474 /* Set the wakeup time: */
475 curthread->wakeup_time.tv_sec =
477 curthread->wakeup_time.tv_nsec =
479 THR_SET_STATE(curthread, PS_COND_WAIT);
481 /* Remember the CV: */
482 curthread->data.cond = *cond;
483 curthread->sigbackout = cond_wait_backout;
484 THR_SCHED_UNLOCK(curthread, curthread);
486 /* Unlock the CV structure: */
487 THR_LOCK_RELEASE(curthread,
490 /* Schedule the next thread: */
491 _thr_sched_switch(curthread);
494 * XXX - This really isn't a good check
495 * since there can be more than one
496 * thread waiting on the CV. Signals
497 * sent to threads waiting on mutexes
498 * or CVs should really be deferred
499 * until the threads are no longer
500 * waiting, but POSIX says that signals
501 * should be sent "as soon as possible".
503 done = (seqno != (*cond)->c_seqno);
504 if (done && !THR_IN_CONDQ(curthread)) {
506 * The thread is dequeued, so
507 * it is safe to clear these.
509 curthread->data.cond = NULL;
510 curthread->sigbackout = NULL;
511 check_continuation(curthread,
513 return (_mutex_cv_lock(mutex));
516 /* Relock the CV structure: */
517 THR_LOCK_ACQUIRE(curthread,
521 * Clear these after taking the lock to
522 * prevent a race condition where a
523 * signal can arrive before dequeueing
526 curthread->data.cond = NULL;
527 curthread->sigbackout = NULL;
529 done = (seqno != (*cond)->c_seqno);
531 if (THR_IN_CONDQ(curthread)) {
532 cond_queue_remove(*cond,
535 /* Check for no more waiters: */
536 if (TAILQ_EMPTY(&(*cond)->c_queue))
537 (*cond)->c_mutex = NULL;
540 if (curthread->timeout != 0) {
541 /* The wait timedout. */
548 /* Trap invalid condition variable types: */
550 /* Return an invalid argument error: */
555 check_continuation(curthread, *cond,
556 mutex_locked ? NULL : mutex);
557 } while ((done == 0) && (rval == 0));
559 /* Unlock the condition variable structure: */
560 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
562 if (mutex_locked == 0)
563 _mutex_cv_lock(mutex);
565 /* Return the completion status: */
570 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
571 const struct timespec *abstime)
573 struct pthread *curthread = _get_curthread();
576 _thr_cancel_enter(curthread);
577 ret = _pthread_cond_timedwait(cond, mutex, abstime);
578 _thr_cancel_leave(curthread, 1);
584 _pthread_cond_signal(pthread_cond_t * cond)
586 struct pthread *curthread = _get_curthread();
587 struct pthread *pthread;
588 struct kse_mailbox *kmbx;
591 THR_ASSERT(curthread->locklevel == 0,
592 "cv_timedwait: locklevel is not zero!");
596 * If the condition variable is statically initialized, perform dynamic
599 else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
600 /* Lock the condition variable structure: */
601 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
603 /* Process according to condition variable type: */
604 switch ((*cond)->c_type) {
605 /* Fast condition variable: */
607 /* Increment the sequence number: */
611 * Wakeups have to be done with the CV lock held;
612 * otherwise there is a race condition where the
613 * thread can timeout, run on another KSE, and enter
614 * another blocking state (including blocking on a CV).
616 if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
618 THR_SCHED_LOCK(curthread, pthread);
619 cond_queue_remove(*cond, pthread);
620 pthread->sigbackout = NULL;
621 if ((pthread->kseg == curthread->kseg) &&
622 (pthread->active_priority >
623 curthread->active_priority))
624 curthread->critical_yield = 1;
625 kmbx = _thr_setrunnable_unlocked(pthread);
626 THR_SCHED_UNLOCK(curthread, pthread);
630 /* Check for no more waiters: */
631 if (TAILQ_EMPTY(&(*cond)->c_queue))
632 (*cond)->c_mutex = NULL;
635 /* Trap invalid condition variable types: */
637 /* Return an invalid argument error: */
642 /* Unlock the condition variable structure: */
643 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
646 /* Return the completion status: */
650 __strong_reference(_pthread_cond_signal, _thr_cond_signal);
653 _pthread_cond_broadcast(pthread_cond_t * cond)
655 struct pthread *curthread = _get_curthread();
656 struct pthread *pthread;
657 struct kse_mailbox *kmbx;
660 THR_ASSERT(curthread->locklevel == 0,
661 "cv_timedwait: locklevel is not zero!");
665 * If the condition variable is statically initialized, perform dynamic
668 else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
669 /* Lock the condition variable structure: */
670 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
672 /* Process according to condition variable type: */
673 switch ((*cond)->c_type) {
674 /* Fast condition variable: */
676 /* Increment the sequence number: */
680 * Enter a loop to bring all threads off the
683 while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
685 THR_SCHED_LOCK(curthread, pthread);
686 cond_queue_remove(*cond, pthread);
687 pthread->sigbackout = NULL;
688 if ((pthread->kseg == curthread->kseg) &&
689 (pthread->active_priority >
690 curthread->active_priority))
691 curthread->critical_yield = 1;
692 kmbx = _thr_setrunnable_unlocked(pthread);
693 THR_SCHED_UNLOCK(curthread, pthread);
698 /* There are no more waiting threads: */
699 (*cond)->c_mutex = NULL;
702 /* Trap invalid condition variable types: */
704 /* Return an invalid argument error: */
709 /* Unlock the condition variable structure: */
710 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
713 /* Return the completion status: */
717 __strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast);
720 check_continuation(struct pthread *curthread, struct pthread_cond *cond,
721 pthread_mutex_t *mutex)
723 if ((curthread->interrupted != 0) &&
724 (curthread->continuation != NULL)) {
726 /* Unlock the condition variable structure: */
727 THR_LOCK_RELEASE(curthread, &cond->c_lock);
729 * Note that even though this thread may have been
730 * canceled, POSIX requires that the mutex be
731 * reaquired prior to cancellation.
734 _mutex_cv_lock(mutex);
735 curthread->continuation((void *) curthread);
736 PANIC("continuation returned in pthread_cond_wait.\n");
741 cond_wait_backout(void *arg)
743 struct pthread *curthread = (struct pthread *)arg;
746 cond = curthread->data.cond;
748 /* Lock the condition variable structure: */
749 THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
751 /* Process according to condition variable type: */
752 switch (cond->c_type) {
753 /* Fast condition variable: */
755 cond_queue_remove(cond, curthread);
757 /* Check for no more waiters: */
758 if (TAILQ_EMPTY(&cond->c_queue))
759 cond->c_mutex = NULL;
766 /* Unlock the condition variable structure: */
767 THR_LOCK_RELEASE(curthread, &cond->c_lock);
769 /* No need to call this again. */
770 curthread->sigbackout = NULL;
774 * Dequeue a waiting thread from the head of a condition queue in
775 * descending priority order.
777 static inline struct pthread *
778 cond_queue_deq(pthread_cond_t cond)
780 struct pthread *pthread;
782 while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
783 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
784 THR_CONDQ_CLEAR(pthread);
785 if ((pthread->timeout == 0) && (pthread->interrupted == 0))
787 * Only exit the loop when we find a thread
788 * that hasn't timed out or been canceled;
789 * those threads are already running and don't
790 * need their run state changed.
799 * Remove a waiting thread from a condition queue in descending priority
803 cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
806 * Because pthread_cond_timedwait() can timeout as well
807 * as be signaled by another thread, it is necessary to
808 * guard against removing the thread from the queue if
809 * it isn't in the queue.
811 if (THR_IN_CONDQ(pthread)) {
812 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
813 THR_CONDQ_CLEAR(pthread);
818 * Enqueue a waiting thread to a condition queue in descending priority
822 cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
824 struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
826 THR_ASSERT(!THR_IN_SYNCQ(pthread),
827 "cond_queue_enq: thread already queued!");
830 * For the common case of all threads having equal priority,
831 * we perform a quick check against the priority of the thread
832 * at the tail of the queue.
834 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
835 TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
837 tid = TAILQ_FIRST(&cond->c_queue);
838 while (pthread->active_priority <= tid->active_priority)
839 tid = TAILQ_NEXT(tid, sqe);
840 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
842 THR_CONDQ_SET(pthread);
843 pthread->data.cond = cond;