]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - lib/libkse/thread/thr_cond.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / lib / libkse / thread / thr_cond.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #include <stdlib.h>
32 #include <errno.h>
33 #include <string.h>
34 #include <pthread.h>
35 #include "thr_private.h"
36
37 LT10_COMPAT_PRIVATE(__pthread_cond_wait);
38 LT10_COMPAT_PRIVATE(_pthread_cond_wait);
39 LT10_COMPAT_DEFAULT(pthread_cond_wait);
40 LT10_COMPAT_PRIVATE(__pthread_cond_timedwait);
41 LT10_COMPAT_PRIVATE(_pthread_cond_timedwait);
42 LT10_COMPAT_DEFAULT(pthread_cond_timedwait);
43 LT10_COMPAT_PRIVATE(_pthread_cond_init);
44 LT10_COMPAT_DEFAULT(pthread_cond_init);
45 LT10_COMPAT_PRIVATE(_pthread_cond_destroy);
46 LT10_COMPAT_DEFAULT(pthread_cond_destroy);
47 LT10_COMPAT_PRIVATE(_pthread_cond_signal);
48 LT10_COMPAT_DEFAULT(pthread_cond_signal);
49 LT10_COMPAT_PRIVATE(_pthread_cond_broadcast);
50 LT10_COMPAT_DEFAULT(pthread_cond_broadcast);
51
52 #define THR_IN_CONDQ(thr)       (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
53 #define THR_CONDQ_SET(thr)      (thr)->sflags |= THR_FLAGS_IN_SYNCQ
54 #define THR_CONDQ_CLEAR(thr)    (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
55
56 /*
57  * Prototypes
58  */
59 static inline struct pthread    *cond_queue_deq(pthread_cond_t);
60 static inline void              cond_queue_remove(pthread_cond_t, pthread_t);
61 static inline void              cond_queue_enq(pthread_cond_t, pthread_t);
62 static void                     cond_wait_backout(void *);
63 static inline void              check_continuation(struct pthread *,
64                                     struct pthread_cond *, pthread_mutex_t *);
65
66 /*
67  * Double underscore versions are cancellation points.  Single underscore
68  * versions are not and are provided for libc internal usage (which
69  * shouldn't introduce cancellation points).
70  */
71 __weak_reference(__pthread_cond_wait, pthread_cond_wait);
72 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
73
74 __weak_reference(_pthread_cond_init, pthread_cond_init);
75 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
76 __weak_reference(_pthread_cond_signal, pthread_cond_signal);
77 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
78
79
80 int
81 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
82 {
83         enum pthread_cond_type type;
84         pthread_cond_t  pcond;
85         int             flags;
86         int             rval = 0;
87
88         if (cond == NULL)
89                 rval = EINVAL;
90         else {
91                 /*
92                  * Check if a pointer to a condition variable attribute
93                  * structure was passed by the caller: 
94                  */
95                 if (cond_attr != NULL && *cond_attr != NULL) {
96                         /* Default to a fast condition variable: */
97                         type = (*cond_attr)->c_type;
98                         flags = (*cond_attr)->c_flags;
99                 } else {
100                         /* Default to a fast condition variable: */
101                         type = COND_TYPE_FAST;
102                         flags = 0;
103                 }
104
105                 /* Process according to condition variable type: */
106                 switch (type) {
107                 /* Fast condition variable: */
108                 case COND_TYPE_FAST:
109                         /* Nothing to do here. */
110                         break;
111
112                 /* Trap invalid condition variable types: */
113                 default:
114                         /* Return an invalid argument error: */
115                         rval = EINVAL;
116                         break;
117                 }
118
119                 /* Check for no errors: */
120                 if (rval == 0) {
121                         if ((pcond = (pthread_cond_t)
122                             malloc(sizeof(struct pthread_cond))) == NULL) {
123                                 rval = ENOMEM;
124                         } else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
125                             _thr_lock_wait, _thr_lock_wakeup, calloc) != 0) {
126                                 free(pcond);
127                                 rval = ENOMEM;
128                         } else {
129                                 /*
130                                  * Initialise the condition variable
131                                  * structure:
132                                  */
133                                 TAILQ_INIT(&pcond->c_queue);
134                                 pcond->c_flags = COND_FLAGS_INITED;
135                                 pcond->c_type = type;
136                                 pcond->c_mutex = NULL;
137                                 pcond->c_seqno = 0;
138                                 *cond = pcond;
139                         }
140                 }
141         }
142         /* Return the completion status: */
143         return (rval);
144 }
145
146 int
147 _pthread_cond_destroy(pthread_cond_t *cond)
148 {
149         struct pthread_cond     *cv;
150         struct pthread          *curthread = _get_curthread();
151         int                     rval = 0;
152
153         if (cond == NULL || *cond == NULL)
154                 rval = EINVAL;
155         else {
156                 /* Lock the condition variable structure: */
157                 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
158
159                 /*
160                  * NULL the caller's pointer now that the condition
161                  * variable has been destroyed:
162                  */
163                 cv = *cond;
164                 *cond = NULL;
165
166                 /* Unlock the condition variable structure: */
167                 THR_LOCK_RELEASE(curthread, &cv->c_lock);
168
169                 /* Free the cond lock structure: */
170                 _lock_destroy(&cv->c_lock);
171
172                 /*
173                  * Free the memory allocated for the condition
174                  * variable structure:
175                  */
176                 free(cv);
177
178         }
179         /* Return the completion status: */
180         return (rval);
181 }
182
183 int
184 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
185 {
186         struct pthread  *curthread = _get_curthread();
187         int     rval = 0;
188         int     done = 0;
189         int     mutex_locked = 1;
190         int     seqno;
191
192         if (cond == NULL)
193                 return (EINVAL);
194
195         /*
196          * If the condition variable is statically initialized,
197          * perform the dynamic initialization:
198          */
199         if (*cond == NULL &&
200             (rval = pthread_cond_init(cond, NULL)) != 0)
201                 return (rval);
202
203         if (!_kse_isthreaded())
204                 _kse_setthreaded(1);
205
206         /*
207          * Enter a loop waiting for a condition signal or broadcast
208          * to wake up this thread.  A loop is needed in case the waiting
209          * thread is interrupted by a signal to execute a signal handler.
210          * It is not (currently) possible to remain in the waiting queue
211          * while running a handler.  Instead, the thread is interrupted
212          * and backed out of the waiting queue prior to executing the
213          * signal handler.
214          */
215
216         /* Lock the condition variable structure: */
217         THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
218         seqno = (*cond)->c_seqno;
219         do {
220                 /*
221                  * If the condvar was statically allocated, properly
222                  * initialize the tail queue.
223                  */
224                 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
225                         TAILQ_INIT(&(*cond)->c_queue);
226                         (*cond)->c_flags |= COND_FLAGS_INITED;
227                 }
228
229                 /* Process according to condition variable type: */
230                 switch ((*cond)->c_type) {
231                 /* Fast condition variable: */
232                 case COND_TYPE_FAST:
233                         if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
234                             ((*cond)->c_mutex != *mutex))) {
235                                 /* Return invalid argument error: */
236                                 rval = EINVAL;
237                         } else {
238                                 /* Reset the timeout and interrupted flags: */
239                                 curthread->timeout = 0;
240                                 curthread->interrupted = 0;
241
242                                 /*
243                                  * Queue the running thread for the condition
244                                  * variable:
245                                  */
246                                 cond_queue_enq(*cond, curthread);
247
248                                 /* Wait forever: */
249                                 curthread->wakeup_time.tv_sec = -1;
250
251                                 /* Unlock the mutex: */
252                                 if (mutex_locked &&
253                                     ((rval = _mutex_cv_unlock(mutex)) != 0)) {
254                                         /*
255                                          * Cannot unlock the mutex, so remove
256                                          * the running thread from the condition
257                                          * variable queue:
258                                          */
259                                         cond_queue_remove(*cond, curthread);
260                                 }
261                                 else {
262                                         /* Remember the mutex: */
263                                         (*cond)->c_mutex = *mutex;
264
265                                         /*
266                                          * Don't unlock the mutex the next
267                                          * time through the loop (if the
268                                          * thread has to be requeued after
269                                          * handling a signal).
270                                          */
271                                         mutex_locked = 0;
272
273                                         /*
274                                          * This thread is active and is in a
275                                          * critical region (holding the cv
276                                          * lock); we should be able to safely
277                                          * set the state.
278                                          */
279                                         THR_SCHED_LOCK(curthread, curthread);
280                                         THR_SET_STATE(curthread, PS_COND_WAIT);
281
282                                         /* Remember the CV: */
283                                         curthread->data.cond = *cond;
284                                         curthread->sigbackout = cond_wait_backout;
285                                         THR_SCHED_UNLOCK(curthread, curthread);
286
287                                         /* Unlock the CV structure: */
288                                         THR_LOCK_RELEASE(curthread,
289                                             &(*cond)->c_lock);
290
291                                         /* Schedule the next thread: */
292                                         _thr_sched_switch(curthread);
293
294                                         /*
295                                          * XXX - This really isn't a good check
296                                          * since there can be more than one
297                                          * thread waiting on the CV.  Signals
298                                          * sent to threads waiting on mutexes
299                                          * or CVs should really be deferred
300                                          * until the threads are no longer
301                                          * waiting, but POSIX says that signals
302                                          * should be sent "as soon as possible".
303                                          */
304                                         done = (seqno != (*cond)->c_seqno);
305                                         if (done && !THR_IN_CONDQ(curthread)) {
306                                                 /*
307                                                  * The thread is dequeued, so
308                                                  * it is safe to clear these.
309                                                  */
310                                                 curthread->data.cond = NULL;
311                                                 curthread->sigbackout = NULL;
312                                                 check_continuation(curthread,
313                                                     NULL, mutex);
314                                                 return (_mutex_cv_lock(mutex));
315                                         }
316
317                                         /* Relock the CV structure: */
318                                         THR_LOCK_ACQUIRE(curthread,
319                                             &(*cond)->c_lock);
320
321                                         /*
322                                          * Clear these after taking the lock to
323                                          * prevent a race condition where a
324                                          * signal can arrive before dequeueing
325                                          * the thread.
326                                          */
327                                         curthread->data.cond = NULL;
328                                         curthread->sigbackout = NULL;
329                                         done = (seqno != (*cond)->c_seqno);
330
331                                         if (THR_IN_CONDQ(curthread)) {
332                                                 cond_queue_remove(*cond,
333                                                     curthread);
334
335                                                 /* Check for no more waiters: */
336                                                 if (TAILQ_EMPTY(&(*cond)->c_queue))
337                                                         (*cond)->c_mutex = NULL;
338                                         }
339                                 }
340                         }
341                         break;
342
343                 /* Trap invalid condition variable types: */
344                 default:
345                         /* Return an invalid argument error: */
346                         rval = EINVAL;
347                         break;
348                 }
349
350                 check_continuation(curthread, *cond,
351                     mutex_locked ? NULL : mutex);
352         } while ((done == 0) && (rval == 0));
353
354         /* Unlock the condition variable structure: */
355         THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
356
357         if (mutex_locked == 0)
358                 _mutex_cv_lock(mutex);
359
360         /* Return the completion status: */
361         return (rval);
362 }
363
364 __strong_reference(_pthread_cond_wait, _thr_cond_wait);
365
366 int
367 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
368 {
369         struct pthread *curthread = _get_curthread();
370         int ret;
371
372         _thr_cancel_enter(curthread);
373         ret = _pthread_cond_wait(cond, mutex);
374         _thr_cancel_leave(curthread, 1);
375         return (ret);
376 }
377
378 int
379 _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
380                        const struct timespec * abstime)
381 {
382         struct pthread  *curthread = _get_curthread();
383         int     rval = 0;
384         int     done = 0;
385         int     mutex_locked = 1;
386         int     seqno;
387
388         THR_ASSERT(curthread->locklevel == 0,
389             "cv_timedwait: locklevel is not zero!");
390
391         if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
392             abstime->tv_nsec >= 1000000000)
393                 return (EINVAL);
394         /*
395          * If the condition variable is statically initialized, perform dynamic
396          * initialization.
397          */
398         if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
399                 return (rval);
400
401         if (!_kse_isthreaded())
402                 _kse_setthreaded(1);
403
404         /*
405          * Enter a loop waiting for a condition signal or broadcast
406          * to wake up this thread.  A loop is needed in case the waiting
407          * thread is interrupted by a signal to execute a signal handler.
408          * It is not (currently) possible to remain in the waiting queue
409          * while running a handler.  Instead, the thread is interrupted
410          * and backed out of the waiting queue prior to executing the
411          * signal handler.
412          */
413
414         /* Lock the condition variable structure: */
415         THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
416         seqno = (*cond)->c_seqno;
417         do {
418                 /*
419                  * If the condvar was statically allocated, properly
420                  * initialize the tail queue.
421                  */
422                 if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
423                         TAILQ_INIT(&(*cond)->c_queue);
424                         (*cond)->c_flags |= COND_FLAGS_INITED;
425                 }
426
427                 /* Process according to condition variable type: */
428                 switch ((*cond)->c_type) {
429                 /* Fast condition variable: */
430                 case COND_TYPE_FAST:
431                         if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
432                             ((*cond)->c_mutex != *mutex))) {
433                                 /* Return invalid argument error: */
434                                 rval = EINVAL;
435                         } else {
436                                 /* Reset the timeout and interrupted flags: */
437                                 curthread->timeout = 0;
438                                 curthread->interrupted = 0;
439
440                                 /*
441                                  * Queue the running thread for the condition
442                                  * variable:
443                                  */
444                                 cond_queue_enq(*cond, curthread);
445
446                                 /* Unlock the mutex: */
447                                 if (mutex_locked &&
448                                    ((rval = _mutex_cv_unlock(mutex)) != 0)) {
449                                         /*
450                                          * Cannot unlock the mutex; remove the
451                                          * running thread from the condition
452                                          * variable queue: 
453                                          */
454                                         cond_queue_remove(*cond, curthread);
455                                 } else {
456                                         /* Remember the mutex: */
457                                         (*cond)->c_mutex = *mutex;
458
459                                         /*
460                                          * Don't unlock the mutex the next
461                                          * time through the loop (if the
462                                          * thread has to be requeued after
463                                          * handling a signal).
464                                          */
465                                         mutex_locked = 0;
466
467                                         /*
468                                          * This thread is active and is in a
469                                          * critical region (holding the cv
470                                          * lock); we should be able to safely
471                                          * set the state.
472                                          */
473                                         THR_SCHED_LOCK(curthread, curthread);
474                                         /* Set the wakeup time: */
475                                         curthread->wakeup_time.tv_sec =
476                                             abstime->tv_sec;
477                                         curthread->wakeup_time.tv_nsec =
478                                             abstime->tv_nsec;
479                                         THR_SET_STATE(curthread, PS_COND_WAIT);
480
481                                         /* Remember the CV: */
482                                         curthread->data.cond = *cond;
483                                         curthread->sigbackout = cond_wait_backout;
484                                         THR_SCHED_UNLOCK(curthread, curthread);
485
486                                         /* Unlock the CV structure: */
487                                         THR_LOCK_RELEASE(curthread,
488                                             &(*cond)->c_lock);
489
490                                         /* Schedule the next thread: */
491                                         _thr_sched_switch(curthread);
492
493                                         /*
494                                          * XXX - This really isn't a good check
495                                          * since there can be more than one
496                                          * thread waiting on the CV.  Signals
497                                          * sent to threads waiting on mutexes
498                                          * or CVs should really be deferred
499                                          * until the threads are no longer
500                                          * waiting, but POSIX says that signals
501                                          * should be sent "as soon as possible".
502                                          */
503                                         done = (seqno != (*cond)->c_seqno);
504                                         if (done && !THR_IN_CONDQ(curthread)) {
505                                                 /*
506                                                  * The thread is dequeued, so
507                                                  * it is safe to clear these.
508                                                  */
509                                                 curthread->data.cond = NULL;
510                                                 curthread->sigbackout = NULL;
511                                                 check_continuation(curthread,
512                                                     NULL, mutex);
513                                                 return (_mutex_cv_lock(mutex));
514                                         }
515
516                                         /* Relock the CV structure: */
517                                         THR_LOCK_ACQUIRE(curthread,
518                                             &(*cond)->c_lock);
519
520                                         /*
521                                          * Clear these after taking the lock to
522                                          * prevent a race condition where a
523                                          * signal can arrive before dequeueing
524                                          * the thread.
525                                          */
526                                         curthread->data.cond = NULL;
527                                         curthread->sigbackout = NULL;
528
529                                         done = (seqno != (*cond)->c_seqno);
530
531                                         if (THR_IN_CONDQ(curthread)) {
532                                                 cond_queue_remove(*cond,
533                                                     curthread);
534
535                                                 /* Check for no more waiters: */
536                                                 if (TAILQ_EMPTY(&(*cond)->c_queue))
537                                                         (*cond)->c_mutex = NULL;
538                                         }
539
540                                         if (curthread->timeout != 0) {
541                                                 /* The wait timedout. */
542                                                 rval = ETIMEDOUT;
543                                         }
544                                 }
545                         }
546                         break;
547
548                 /* Trap invalid condition variable types: */
549                 default:
550                         /* Return an invalid argument error: */
551                         rval = EINVAL;
552                         break;
553                 }
554
555                 check_continuation(curthread, *cond,
556                     mutex_locked ? NULL : mutex);
557         } while ((done == 0) && (rval == 0));
558
559         /* Unlock the condition variable structure: */
560         THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
561
562         if (mutex_locked == 0)
563                 _mutex_cv_lock(mutex);
564
565         /* Return the completion status: */
566         return (rval);
567 }
568
569 int
570 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
571                        const struct timespec *abstime)
572 {
573         struct pthread *curthread = _get_curthread();
574         int ret;
575
576         _thr_cancel_enter(curthread);
577         ret = _pthread_cond_timedwait(cond, mutex, abstime);
578         _thr_cancel_leave(curthread, 1);
579         return (ret);
580 }
581
582
583 int
584 _pthread_cond_signal(pthread_cond_t * cond)
585 {
586         struct pthread  *curthread = _get_curthread();
587         struct pthread  *pthread;
588         struct kse_mailbox *kmbx;
589         int             rval = 0;
590
591         THR_ASSERT(curthread->locklevel == 0,
592             "cv_timedwait: locklevel is not zero!");
593         if (cond == NULL)
594                 rval = EINVAL;
595        /*
596         * If the condition variable is statically initialized, perform dynamic
597         * initialization.
598         */
599         else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
600                 /* Lock the condition variable structure: */
601                 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
602
603                 /* Process according to condition variable type: */
604                 switch ((*cond)->c_type) {
605                 /* Fast condition variable: */
606                 case COND_TYPE_FAST:
607                         /* Increment the sequence number: */
608                         (*cond)->c_seqno++;
609
610                         /*
611                          * Wakeups have to be done with the CV lock held;
612                          * otherwise there is a race condition where the
613                          * thread can timeout, run on another KSE, and enter
614                          * another blocking state (including blocking on a CV).
615                          */
616                         if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
617                             != NULL) {
618                                 THR_SCHED_LOCK(curthread, pthread);
619                                 cond_queue_remove(*cond, pthread);
620                                 pthread->sigbackout = NULL;
621                                 if ((pthread->kseg == curthread->kseg) &&
622                                     (pthread->active_priority >
623                                     curthread->active_priority))
624                                         curthread->critical_yield = 1;
625                                 kmbx = _thr_setrunnable_unlocked(pthread);
626                                 THR_SCHED_UNLOCK(curthread, pthread);
627                                 if (kmbx != NULL)
628                                         kse_wakeup(kmbx);
629                         }
630                         /* Check for no more waiters: */
631                         if (TAILQ_EMPTY(&(*cond)->c_queue))
632                                 (*cond)->c_mutex = NULL;
633                         break;
634
635                 /* Trap invalid condition variable types: */
636                 default:
637                         /* Return an invalid argument error: */
638                         rval = EINVAL;
639                         break;
640                 }
641
642                 /* Unlock the condition variable structure: */
643                 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
644         }
645
646         /* Return the completion status: */
647         return (rval);
648 }
649
650 __strong_reference(_pthread_cond_signal, _thr_cond_signal);
651
652 int
653 _pthread_cond_broadcast(pthread_cond_t * cond)
654 {
655         struct pthread  *curthread = _get_curthread();
656         struct pthread  *pthread;
657         struct kse_mailbox *kmbx;
658         int             rval = 0;
659
660         THR_ASSERT(curthread->locklevel == 0,
661             "cv_timedwait: locklevel is not zero!");
662         if (cond == NULL)
663                 rval = EINVAL;
664        /*
665         * If the condition variable is statically initialized, perform dynamic
666         * initialization.
667         */
668         else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
669                 /* Lock the condition variable structure: */
670                 THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
671
672                 /* Process according to condition variable type: */
673                 switch ((*cond)->c_type) {
674                 /* Fast condition variable: */
675                 case COND_TYPE_FAST:
676                         /* Increment the sequence number: */
677                         (*cond)->c_seqno++;
678
679                         /*
680                          * Enter a loop to bring all threads off the
681                          * condition queue:
682                          */
683                         while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
684                             != NULL) {
685                                 THR_SCHED_LOCK(curthread, pthread);
686                                 cond_queue_remove(*cond, pthread);
687                                 pthread->sigbackout = NULL;
688                                 if ((pthread->kseg == curthread->kseg) &&
689                                     (pthread->active_priority >
690                                     curthread->active_priority))
691                                         curthread->critical_yield = 1;
692                                 kmbx = _thr_setrunnable_unlocked(pthread);
693                                 THR_SCHED_UNLOCK(curthread, pthread);
694                                 if (kmbx != NULL)
695                                         kse_wakeup(kmbx);
696                         }
697
698                         /* There are no more waiting threads: */
699                         (*cond)->c_mutex = NULL;
700                         break;
701
702                 /* Trap invalid condition variable types: */
703                 default:
704                         /* Return an invalid argument error: */
705                         rval = EINVAL;
706                         break;
707                 }
708
709                 /* Unlock the condition variable structure: */
710                 THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
711         }
712
713         /* Return the completion status: */
714         return (rval);
715 }
716
717 __strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast);
718
719 static inline void
720 check_continuation(struct pthread *curthread, struct pthread_cond *cond,
721     pthread_mutex_t *mutex)
722 {
723         if ((curthread->interrupted != 0) &&
724             (curthread->continuation != NULL)) {
725                 if (cond != NULL)
726                         /* Unlock the condition variable structure: */
727                         THR_LOCK_RELEASE(curthread, &cond->c_lock);
728                 /*
729                  * Note that even though this thread may have been
730                  * canceled, POSIX requires that the mutex be
731                  * reaquired prior to cancellation.
732                  */
733                 if (mutex != NULL)
734                         _mutex_cv_lock(mutex);
735                 curthread->continuation((void *) curthread);
736                 PANIC("continuation returned in pthread_cond_wait.\n");
737         }
738 }
739
740 static void
741 cond_wait_backout(void *arg)
742 {
743         struct pthread *curthread = (struct pthread *)arg;
744         pthread_cond_t  cond;
745
746         cond = curthread->data.cond;
747         if (cond != NULL) {
748                 /* Lock the condition variable structure: */
749                 THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
750
751                 /* Process according to condition variable type: */
752                 switch (cond->c_type) {
753                 /* Fast condition variable: */
754                 case COND_TYPE_FAST:
755                         cond_queue_remove(cond, curthread);
756
757                         /* Check for no more waiters: */
758                         if (TAILQ_EMPTY(&cond->c_queue))
759                                 cond->c_mutex = NULL;
760                         break;
761
762                 default:
763                         break;
764                 }
765
766                 /* Unlock the condition variable structure: */
767                 THR_LOCK_RELEASE(curthread, &cond->c_lock);
768         }
769         /* No need to call this again. */
770         curthread->sigbackout = NULL;
771 }
772
773 /*
774  * Dequeue a waiting thread from the head of a condition queue in
775  * descending priority order.
776  */
777 static inline struct pthread *
778 cond_queue_deq(pthread_cond_t cond)
779 {
780         struct pthread  *pthread;
781
782         while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
783                 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
784                 THR_CONDQ_CLEAR(pthread);
785                 if ((pthread->timeout == 0) && (pthread->interrupted == 0))
786                         /*
787                          * Only exit the loop when we find a thread
788                          * that hasn't timed out or been canceled;
789                          * those threads are already running and don't
790                          * need their run state changed.
791                          */
792                         break;
793         }
794
795         return (pthread);
796 }
797
798 /*
799  * Remove a waiting thread from a condition queue in descending priority
800  * order.
801  */
802 static inline void
803 cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
804 {
805         /*
806          * Because pthread_cond_timedwait() can timeout as well
807          * as be signaled by another thread, it is necessary to
808          * guard against removing the thread from the queue if
809          * it isn't in the queue.
810          */
811         if (THR_IN_CONDQ(pthread)) {
812                 TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
813                 THR_CONDQ_CLEAR(pthread);
814         }
815 }
816
817 /*
818  * Enqueue a waiting thread to a condition queue in descending priority
819  * order.
820  */
821 static inline void
822 cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
823 {
824         struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
825
826         THR_ASSERT(!THR_IN_SYNCQ(pthread),
827             "cond_queue_enq: thread already queued!");
828
829         /*
830          * For the common case of all threads having equal priority,
831          * we perform a quick check against the priority of the thread
832          * at the tail of the queue.
833          */
834         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
835                 TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
836         else {
837                 tid = TAILQ_FIRST(&cond->c_queue);
838                 while (pthread->active_priority <= tid->active_priority)
839                         tid = TAILQ_NEXT(tid, sqe);
840                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
841         }
842         THR_CONDQ_SET(pthread);
843         pthread->data.cond = cond;
844 }