]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - lib/libkse/thread/thr_cond.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / lib / libkse / thread / thr_cond.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31
32 #include "namespace.h"
33 #include <stdlib.h>
34 #include <errno.h>
35 #include <string.h>
36 #include <pthread.h>
37 #include "un-namespace.h"
38 #include "thr_private.h"
39
40 #define THR_IN_CONDQ(thr)       (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
41 #define THR_CONDQ_SET(thr)      (thr)->sflags |= THR_FLAGS_IN_SYNCQ
42 #define THR_CONDQ_CLEAR(thr)    (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
43
44 /*
45  * Prototypes
46  */
47 static inline struct pthread    *cond_queue_deq(pthread_cond_t);
48 static inline void              cond_queue_remove(pthread_cond_t, pthread_t);
49 static inline void              cond_queue_enq(pthread_cond_t, pthread_t);
50 static void                     cond_wait_backout(void *);
51 static inline void              check_continuation(struct pthread *,
52                                     struct pthread_cond *, pthread_mutex_t *);
53
54 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
55 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
56                        const struct timespec *abstime);
57
58 /*
59  * Double underscore versions are cancellation points.  Single underscore
60  * versions are not and are provided for libc internal usage (which
61  * shouldn't introduce cancellation points).
62  */
63 __weak_reference(__pthread_cond_wait, pthread_cond_wait);
64 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
65
66 __weak_reference(_pthread_cond_init, pthread_cond_init);
67 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
68 __weak_reference(_pthread_cond_signal, pthread_cond_signal);
69 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
70
71
72 int
73 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
74 {
75         enum pthread_cond_type type;
76         pthread_cond_t  pcond;
77         int             flags;
78         int             rval = 0;
79
80         if (cond == NULL)
81                 rval = EINVAL;
82         else {
83                 /*
84                  * Check if a pointer to a condition variable attribute
85                  * structure was passed by the caller: 
86                  */
87                 if (cond_attr != NULL && *cond_attr != NULL) {
88                         /* Default to a fast condition variable: */
89                         type = (*cond_attr)->c_type;
90                         flags = (*cond_attr)->c_flags;
91                 } else {
92                         /* Default to a fast condition variable: */
93                         type = COND_TYPE_FAST;
94                         flags = 0;
95                 }
96
97                 /* Process according to condition variable type: */
98                 switch (type) {
99                 /* Fast condition variable: */
100                 case COND_TYPE_FAST:
101                         /* Nothing to do here. */
102                         break;
103
104                 /* Trap invalid condition variable types: */
105                 default:
106                         /* Return an invalid argument error: */
107                         rval = EINVAL;
108                         break;
109                 }
110
111                 /* Check for no errors: */
112                 if (rval == 0) {
113                         if ((pcond = (pthread_cond_t)
114                             malloc(sizeof(struct pthread_cond))) == NULL) {
115                                 rval = ENOMEM;
116                         } else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
117                             _thr_lock_wait, _thr_lock_wakeup, calloc) != 0) {
118                                 free(pcond);
119                                 rval = ENOMEM;
120                         } else {
121                                 /*
122                                  * Initialise the condition variable
123                                  * structure:
124                                  */
125                                 TAILQ_INIT(&pcond->c_queue);
126                                 pcond->c_flags = COND_FLAGS_INITED;
127                                 pcond->c_type = type;
128                                 pcond->c_mutex = NULL;
129                                 pcond->c_seqno = 0;
130                                 *cond = pcond;
131                         }
132                 }
133         }
134         /* Return the completion status: */
135         return (rval);
136 }
137
138 int
139 _pthread_cond_destroy(pthread_cond_t *cond)
140 {
141         struct pthread_cond     *cv;
142         struct pthread          *curthread = _get_curthread();
143         int                     rval = 0;
144
145         if (cond == NULL || *cond == NULL)
146                 rval = EINVAL;
147         else {
148                 /* Lock the condition variable structure: */
149                 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
150
151                 /*
152                  * NULL the caller's pointer now that the condition
153                  * variable has been destroyed:
154                  */
155                 cv = *cond;
156                 *cond = NULL;
157
158                 /* Unlock the condition variable structure: */
159                 THR_LOCK_RELEASE(curthread, &cv->c_lock);
160
161                 /* Free the cond lock structure: */
162                 _lock_destroy(&cv->c_lock);
163
164                 /*
165                  * Free the memory allocated for the condition
166                  * variable structure:
167                  */
168                 free(cv);
169
170         }
171         /* Return the completion status: */
172         return (rval);
173 }
174
175 int
176 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
177 {
178         struct pthread  *curthread = _get_curthread();
179         int     rval = 0;
180         int     done = 0;
181         int     mutex_locked = 1;
182         int     seqno;
183
184         if (cond == NULL)
185                 return (EINVAL);
186
187         /*
188          * If the condition variable is statically initialized,
189          * perform the dynamic initialization:
190          */
191         if (*cond == NULL &&
192             (rval = _pthread_cond_init(cond, NULL)) != 0)
193                 return (rval);
194
195         if (!_kse_isthreaded())
196                 _kse_setthreaded(1);
197
198         /*
199          * Enter a loop waiting for a condition signal or broadcast
200          * to wake up this thread.  A loop is needed in case the waiting
201          * thread is interrupted by a signal to execute a signal handler.
202          * It is not (currently) possible to remain in the waiting queue
203          * while running a handler.  Instead, the thread is interrupted
204          * and backed out of the waiting queue prior to executing the
205          * signal handler.
206          */
207
208         /* Lock the condition variable structure: */
209         THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
210         seqno = (*cond)->c_seqno;
211         do {
212                 /*
213                  * If the condvar was statically allocated, properly
214                  * initialize the tail queue.
215                  */
216                 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
217                         TAILQ_INIT(&(*cond)->c_queue);
218                         (*cond)->c_flags |= COND_FLAGS_INITED;
219                 }
220
221                 /* Process according to condition variable type: */
222                 switch ((*cond)->c_type) {
223                 /* Fast condition variable: */
224                 case COND_TYPE_FAST:
225                         if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
226                             ((*cond)->c_mutex != *mutex))) {
227                                 /* Return invalid argument error: */
228                                 rval = EINVAL;
229                         } else {
230                                 /* Reset the timeout and interrupted flags: */
231                                 curthread->timeout = 0;
232                                 curthread->interrupted = 0;
233
234                                 /*
235                                  * Queue the running thread for the condition
236                                  * variable:
237                                  */
238                                 cond_queue_enq(*cond, curthread);
239
240                                 /* Wait forever: */
241                                 curthread->wakeup_time.tv_sec = -1;
242
243                                 /* Unlock the mutex: */
244                                 if (mutex_locked &&
245                                     ((rval = _mutex_cv_unlock(mutex)) != 0)) {
246                                         /*
247                                          * Cannot unlock the mutex, so remove
248                                          * the running thread from the condition
249                                          * variable queue:
250                                          */
251                                         cond_queue_remove(*cond, curthread);
252                                 }
253                                 else {
254                                         /* Remember the mutex: */
255                                         (*cond)->c_mutex = *mutex;
256
257                                         /*
258                                          * Don't unlock the mutex the next
259                                          * time through the loop (if the
260                                          * thread has to be requeued after
261                                          * handling a signal).
262                                          */
263                                         mutex_locked = 0;
264
265                                         /*
266                                          * This thread is active and is in a
267                                          * critical region (holding the cv
268                                          * lock); we should be able to safely
269                                          * set the state.
270                                          */
271                                         THR_SCHED_LOCK(curthread, curthread);
272                                         THR_SET_STATE(curthread, PS_COND_WAIT);
273
274                                         /* Remember the CV: */
275                                         curthread->data.cond = *cond;
276                                         curthread->sigbackout = cond_wait_backout;
277                                         THR_SCHED_UNLOCK(curthread, curthread);
278
279                                         /* Unlock the CV structure: */
280                                         THR_LOCK_RELEASE(curthread,
281                                             &(*cond)->c_lock);
282
283                                         /* Schedule the next thread: */
284                                         _thr_sched_switch(curthread);
285
286                                         /*
287                                          * XXX - This really isn't a good check
288                                          * since there can be more than one
289                                          * thread waiting on the CV.  Signals
290                                          * sent to threads waiting on mutexes
291                                          * or CVs should really be deferred
292                                          * until the threads are no longer
293                                          * waiting, but POSIX says that signals
294                                          * should be sent "as soon as possible".
295                                          */
296                                         done = (seqno != (*cond)->c_seqno);
297                                         if (done && !THR_IN_CONDQ(curthread)) {
298                                                 /*
299                                                  * The thread is dequeued, so
300                                                  * it is safe to clear these.
301                                                  */
302                                                 curthread->data.cond = NULL;
303                                                 curthread->sigbackout = NULL;
304                                                 check_continuation(curthread,
305                                                     NULL, mutex);
306                                                 return (_mutex_cv_lock(mutex));
307                                         }
308
309                                         /* Relock the CV structure: */
310                                         THR_LOCK_ACQUIRE(curthread,
311                                             &(*cond)->c_lock);
312
313                                         /*
314                                          * Clear these after taking the lock to
315                                          * prevent a race condition where a
316                                          * signal can arrive before dequeueing
317                                          * the thread.
318                                          */
319                                         curthread->data.cond = NULL;
320                                         curthread->sigbackout = NULL;
321                                         done = (seqno != (*cond)->c_seqno);
322
323                                         if (THR_IN_CONDQ(curthread)) {
324                                                 cond_queue_remove(*cond,
325                                                     curthread);
326
327                                                 /* Check for no more waiters: */
328                                                 if (TAILQ_EMPTY(&(*cond)->c_queue))
329                                                         (*cond)->c_mutex = NULL;
330                                         }
331                                 }
332                         }
333                         break;
334
335                 /* Trap invalid condition variable types: */
336                 default:
337                         /* Return an invalid argument error: */
338                         rval = EINVAL;
339                         break;
340                 }
341
342                 check_continuation(curthread, *cond,
343                     mutex_locked ? NULL : mutex);
344         } while ((done == 0) && (rval == 0));
345
346         /* Unlock the condition variable structure: */
347         THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
348
349         if (mutex_locked == 0)
350                 _mutex_cv_lock(mutex);
351
352         /* Return the completion status: */
353         return (rval);
354 }
355
356 __strong_reference(_pthread_cond_wait, _thr_cond_wait);
357
358 int
359 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
360 {
361         struct pthread *curthread = _get_curthread();
362         int ret;
363
364         _thr_cancel_enter(curthread);
365         ret = _pthread_cond_wait(cond, mutex);
366         _thr_cancel_leave(curthread, 1);
367         return (ret);
368 }
369
370 int
371 _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
372                        const struct timespec * abstime)
373 {
374         struct pthread  *curthread = _get_curthread();
375         int     rval = 0;
376         int     done = 0;
377         int     mutex_locked = 1;
378         int     seqno;
379
380         THR_ASSERT(curthread->locklevel == 0,
381             "cv_timedwait: locklevel is not zero!");
382
383         if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
384             abstime->tv_nsec >= 1000000000)
385                 return (EINVAL);
386         /*
387          * If the condition variable is statically initialized, perform dynamic
388          * initialization.
389          */
390         if (*cond == NULL && (rval = _pthread_cond_init(cond, NULL)) != 0)
391                 return (rval);
392
393         if (!_kse_isthreaded())
394                 _kse_setthreaded(1);
395
396         /*
397          * Enter a loop waiting for a condition signal or broadcast
398          * to wake up this thread.  A loop is needed in case the waiting
399          * thread is interrupted by a signal to execute a signal handler.
400          * It is not (currently) possible to remain in the waiting queue
401          * while running a handler.  Instead, the thread is interrupted
402          * and backed out of the waiting queue prior to executing the
403          * signal handler.
404          */
405
406         /* Lock the condition variable structure: */
407         THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
408         seqno = (*cond)->c_seqno;
409         do {
410                 /*
411                  * If the condvar was statically allocated, properly
412                  * initialize the tail queue.
413                  */
414                 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
415                         TAILQ_INIT(&(*cond)->c_queue);
416                         (*cond)->c_flags |= COND_FLAGS_INITED;
417                 }
418
419                 /* Process according to condition variable type: */
420                 switch ((*cond)->c_type) {
421                 /* Fast condition variable: */
422                 case COND_TYPE_FAST:
423                         if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
424                             ((*cond)->c_mutex != *mutex))) {
425                                 /* Return invalid argument error: */
426                                 rval = EINVAL;
427                         } else {
428                                 /* Reset the timeout and interrupted flags: */
429                                 curthread->timeout = 0;
430                                 curthread->interrupted = 0;
431
432                                 /*
433                                  * Queue the running thread for the condition
434                                  * variable:
435                                  */
436                                 cond_queue_enq(*cond, curthread);
437
438                                 /* Unlock the mutex: */
439                                 if (mutex_locked &&
440                                    ((rval = _mutex_cv_unlock(mutex)) != 0)) {
441                                         /*
442                                          * Cannot unlock the mutex; remove the
443                                          * running thread from the condition
444                                          * variable queue: 
445                                          */
446                                         cond_queue_remove(*cond, curthread);
447                                 } else {
448                                         /* Remember the mutex: */
449                                         (*cond)->c_mutex = *mutex;
450
451                                         /*
452                                          * Don't unlock the mutex the next
453                                          * time through the loop (if the
454                                          * thread has to be requeued after
455                                          * handling a signal).
456                                          */
457                                         mutex_locked = 0;
458
459                                         /*
460                                          * This thread is active and is in a
461                                          * critical region (holding the cv
462                                          * lock); we should be able to safely
463                                          * set the state.
464                                          */
465                                         THR_SCHED_LOCK(curthread, curthread);
466                                         /* Set the wakeup time: */
467                                         curthread->wakeup_time.tv_sec =
468                                             abstime->tv_sec;
469                                         curthread->wakeup_time.tv_nsec =
470                                             abstime->tv_nsec;
471                                         THR_SET_STATE(curthread, PS_COND_WAIT);
472
473                                         /* Remember the CV: */
474                                         curthread->data.cond = *cond;
475                                         curthread->sigbackout = cond_wait_backout;
476                                         THR_SCHED_UNLOCK(curthread, curthread);
477
478                                         /* Unlock the CV structure: */
479                                         THR_LOCK_RELEASE(curthread,
480                                             &(*cond)->c_lock);
481
482                                         /* Schedule the next thread: */
483                                         _thr_sched_switch(curthread);
484
485                                         /*
486                                          * XXX - This really isn't a good check
487                                          * since there can be more than one
488                                          * thread waiting on the CV.  Signals
489                                          * sent to threads waiting on mutexes
490                                          * or CVs should really be deferred
491                                          * until the threads are no longer
492                                          * waiting, but POSIX says that signals
493                                          * should be sent "as soon as possible".
494                                          */
495                                         done = (seqno != (*cond)->c_seqno);
496                                         if (done && !THR_IN_CONDQ(curthread)) {
497                                                 /*
498                                                  * The thread is dequeued, so
499                                                  * it is safe to clear these.
500                                                  */
501                                                 curthread->data.cond = NULL;
502                                                 curthread->sigbackout = NULL;
503                                                 check_continuation(curthread,
504                                                     NULL, mutex);
505                                                 return (_mutex_cv_lock(mutex));
506                                         }
507
508                                         /* Relock the CV structure: */
509                                         THR_LOCK_ACQUIRE(curthread,
510                                             &(*cond)->c_lock);
511
512                                         /*
513                                          * Clear these after taking the lock to
514                                          * prevent a race condition where a
515                                          * signal can arrive before dequeueing
516                                          * the thread.
517                                          */
518                                         curthread->data.cond = NULL;
519                                         curthread->sigbackout = NULL;
520
521                                         done = (seqno != (*cond)->c_seqno);
522
523                                         if (THR_IN_CONDQ(curthread)) {
524                                                 cond_queue_remove(*cond,
525                                                     curthread);
526
527                                                 /* Check for no more waiters: */
528                                                 if (TAILQ_EMPTY(&(*cond)->c_queue))
529                                                         (*cond)->c_mutex = NULL;
530                                         }
531
532                                         if (curthread->timeout != 0) {
533                                                 /* The wait timedout. */
534                                                 rval = ETIMEDOUT;
535                                         }
536                                 }
537                         }
538                         break;
539
540                 /* Trap invalid condition variable types: */
541                 default:
542                         /* Return an invalid argument error: */
543                         rval = EINVAL;
544                         break;
545                 }
546
547                 check_continuation(curthread, *cond,
548                     mutex_locked ? NULL : mutex);
549         } while ((done == 0) && (rval == 0));
550
551         /* Unlock the condition variable structure: */
552         THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
553
554         if (mutex_locked == 0)
555                 _mutex_cv_lock(mutex);
556
557         /* Return the completion status: */
558         return (rval);
559 }
560
561 int
562 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
563                        const struct timespec *abstime)
564 {
565         struct pthread *curthread = _get_curthread();
566         int ret;
567
568         _thr_cancel_enter(curthread);
569         ret = _pthread_cond_timedwait(cond, mutex, abstime);
570         _thr_cancel_leave(curthread, 1);
571         return (ret);
572 }
573
574
575 int
576 _pthread_cond_signal(pthread_cond_t * cond)
577 {
578         struct pthread  *curthread = _get_curthread();
579         struct pthread  *pthread;
580         struct kse_mailbox *kmbx;
581         int             rval = 0;
582
583         THR_ASSERT(curthread->locklevel == 0,
584             "cv_timedwait: locklevel is not zero!");
585         if (cond == NULL)
586                 rval = EINVAL;
587        /*
588         * If the condition variable is statically initialized, perform dynamic
589         * initialization.
590         */
591         else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
592                 /* Lock the condition variable structure: */
593                 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
594
595                 /* Process according to condition variable type: */
596                 switch ((*cond)->c_type) {
597                 /* Fast condition variable: */
598                 case COND_TYPE_FAST:
599                         /* Increment the sequence number: */
600                         (*cond)->c_seqno++;
601
602                         /*
603                          * Wakeups have to be done with the CV lock held;
604                          * otherwise there is a race condition where the
605                          * thread can timeout, run on another KSE, and enter
606                          * another blocking state (including blocking on a CV).
607                          */
608                         if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
609                             != NULL) {
610                                 THR_SCHED_LOCK(curthread, pthread);
611                                 cond_queue_remove(*cond, pthread);
612                                 pthread->sigbackout = NULL;
613                                 if ((pthread->kseg == curthread->kseg) &&
614                                     (pthread->active_priority >
615                                     curthread->active_priority))
616                                         curthread->critical_yield = 1;
617                                 kmbx = _thr_setrunnable_unlocked(pthread);
618                                 THR_SCHED_UNLOCK(curthread, pthread);
619                                 if (kmbx != NULL)
620                                         kse_wakeup(kmbx);
621                         }
622                         /* Check for no more waiters: */
623                         if (TAILQ_EMPTY(&(*cond)->c_queue))
624                                 (*cond)->c_mutex = NULL;
625                         break;
626
627                 /* Trap invalid condition variable types: */
628                 default:
629                         /* Return an invalid argument error: */
630                         rval = EINVAL;
631                         break;
632                 }
633
634                 /* Unlock the condition variable structure: */
635                 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
636         }
637
638         /* Return the completion status: */
639         return (rval);
640 }
641
642 __strong_reference(_pthread_cond_signal, _thr_cond_signal);
643
644 int
645 _pthread_cond_broadcast(pthread_cond_t * cond)
646 {
647         struct pthread  *curthread = _get_curthread();
648         struct pthread  *pthread;
649         struct kse_mailbox *kmbx;
650         int             rval = 0;
651
652         THR_ASSERT(curthread->locklevel == 0,
653             "cv_timedwait: locklevel is not zero!");
654         if (cond == NULL)
655                 rval = EINVAL;
656        /*
657         * If the condition variable is statically initialized, perform dynamic
658         * initialization.
659         */
660         else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
661                 /* Lock the condition variable structure: */
662                 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
663
664                 /* Process according to condition variable type: */
665                 switch ((*cond)->c_type) {
666                 /* Fast condition variable: */
667                 case COND_TYPE_FAST:
668                         /* Increment the sequence number: */
669                         (*cond)->c_seqno++;
670
671                         /*
672                          * Enter a loop to bring all threads off the
673                          * condition queue:
674                          */
675                         while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
676                             != NULL) {
677                                 THR_SCHED_LOCK(curthread, pthread);
678                                 cond_queue_remove(*cond, pthread);
679                                 pthread->sigbackout = NULL;
680                                 if ((pthread->kseg == curthread->kseg) &&
681                                     (pthread->active_priority >
682                                     curthread->active_priority))
683                                         curthread->critical_yield = 1;
684                                 kmbx = _thr_setrunnable_unlocked(pthread);
685                                 THR_SCHED_UNLOCK(curthread, pthread);
686                                 if (kmbx != NULL)
687                                         kse_wakeup(kmbx);
688                         }
689
690                         /* There are no more waiting threads: */
691                         (*cond)->c_mutex = NULL;
692                         break;
693
694                 /* Trap invalid condition variable types: */
695                 default:
696                         /* Return an invalid argument error: */
697                         rval = EINVAL;
698                         break;
699                 }
700
701                 /* Unlock the condition variable structure: */
702                 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
703         }
704
705         /* Return the completion status: */
706         return (rval);
707 }
708
709 __strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast);
710
711 static inline void
712 check_continuation(struct pthread *curthread, struct pthread_cond *cond,
713     pthread_mutex_t *mutex)
714 {
715         if ((curthread->interrupted != 0) &&
716             (curthread->continuation != NULL)) {
717                 if (cond != NULL)
718                         /* Unlock the condition variable structure: */
719                         THR_LOCK_RELEASE(curthread, &cond->c_lock);
720                 /*
721                  * Note that even though this thread may have been
722                  * canceled, POSIX requires that the mutex be
723                  * reaquired prior to cancellation.
724                  */
725                 if (mutex != NULL)
726                         _mutex_cv_lock(mutex);
727                 curthread->continuation((void *) curthread);
728                 PANIC("continuation returned in pthread_cond_wait.\n");
729         }
730 }
731
732 static void
733 cond_wait_backout(void *arg)
734 {
735         struct pthread *curthread = (struct pthread *)arg;
736         pthread_cond_t  cond;
737
738         cond = curthread->data.cond;
739         if (cond != NULL) {
740                 /* Lock the condition variable structure: */
741                 THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
742
743                 /* Process according to condition variable type: */
744                 switch (cond->c_type) {
745                 /* Fast condition variable: */
746                 case COND_TYPE_FAST:
747                         cond_queue_remove(cond, curthread);
748
749                         /* Check for no more waiters: */
750                         if (TAILQ_EMPTY(&cond->c_queue))
751                                 cond->c_mutex = NULL;
752                         break;
753
754                 default:
755                         break;
756                 }
757
758                 /* Unlock the condition variable structure: */
759                 THR_LOCK_RELEASE(curthread, &cond->c_lock);
760         }
761         /* No need to call this again. */
762         curthread->sigbackout = NULL;
763 }
764
765 /*
766  * Dequeue a waiting thread from the head of a condition queue in
767  * descending priority order.
768  */
769 static inline struct pthread *
770 cond_queue_deq(pthread_cond_t cond)
771 {
772         struct pthread  *pthread;
773
774         while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
775                 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
776                 THR_CONDQ_CLEAR(pthread);
777                 if ((pthread->timeout == 0) && (pthread->interrupted == 0))
778                         /*
779                          * Only exit the loop when we find a thread
780                          * that hasn't timed out or been canceled;
781                          * those threads are already running and don't
782                          * need their run state changed.
783                          */
784                         break;
785         }
786
787         return (pthread);
788 }
789
790 /*
791  * Remove a waiting thread from a condition queue in descending priority
792  * order.
793  */
794 static inline void
795 cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
796 {
797         /*
798          * Because pthread_cond_timedwait() can timeout as well
799          * as be signaled by another thread, it is necessary to
800          * guard against removing the thread from the queue if
801          * it isn't in the queue.
802          */
803         if (THR_IN_CONDQ(pthread)) {
804                 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
805                 THR_CONDQ_CLEAR(pthread);
806         }
807 }
808
809 /*
810  * Enqueue a waiting thread to a condition queue in descending priority
811  * order.
812  */
813 static inline void
814 cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
815 {
816         struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
817
818         THR_ASSERT(!THR_IN_SYNCQ(pthread),
819             "cond_queue_enq: thread already queued!");
820
821         /*
822          * For the common case of all threads having equal priority,
823          * we perform a quick check against the priority of the thread
824          * at the tail of the queue.
825          */
826         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
827                 TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
828         else {
829                 tid = TAILQ_FIRST(&cond->c_queue);
830                 while (pthread->active_priority <= tid->active_priority)
831                         tid = TAILQ_NEXT(tid, sqe);
832                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
833         }
834         THR_CONDQ_SET(pthread);
835         pthread->data.cond = cond;
836 }