2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2015 The FreeBSD Foundation
6 * Portions of this software were developed by Konstantin Belousov
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "namespace.h"
40 #include "un-namespace.h"
42 #include "thr_private.h"
44 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE,
45 "pthread_cond too large");
50 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
51 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
52 const struct timespec * abstime);
53 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
54 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
55 const struct timespec *abstime, int cancel);
56 static int cond_signal_common(pthread_cond_t *cond);
57 static int cond_broadcast_common(pthread_cond_t *cond);
60 * Double underscore versions are cancellation points. Single underscore
61 * versions are not and are provided for libc internal usage (which
62 * shouldn't introduce cancellation points).
64 __weak_reference(__pthread_cond_wait, pthread_cond_wait);
65 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
67 __weak_reference(_pthread_cond_init, pthread_cond_init);
68 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
69 __weak_reference(_pthread_cond_signal, pthread_cond_signal);
70 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
72 #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0)
75 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
79 cvp->kcond.c_clockid = CLOCK_REALTIME;
82 cvp->kcond.c_flags |= USYNC_PROCESS_SHARED;
83 cvp->kcond.c_clockid = cattr->c_clockid;
88 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
90 struct pthread_cond *cvp;
91 const struct pthread_cond_attr *cattr;
94 cattr = cond_attr != NULL ? *cond_attr : NULL;
95 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
97 cvp = calloc(1, sizeof(struct pthread_cond));
102 cvp = __thr_pshared_offpage(cond, 1);
108 * Initialise the condition variable structure:
110 cond_init_body(cvp, cattr);
111 *cond = pshared ? THR_PSHARED_PTR : cvp;
116 init_static(struct pthread *thread, pthread_cond_t *cond)
120 THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
123 ret = cond_init(cond, NULL);
127 THR_LOCK_RELEASE(thread, &_cond_static_lock);
132 #define CHECK_AND_INIT_COND \
133 if (*cond == THR_PSHARED_PTR) { \
134 cvp = __thr_pshared_offpage(cond, 0); \
137 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
138 if (cvp == THR_COND_INITIALIZER) { \
140 ret = init_static(_get_curthread(), cond); \
143 } else if (cvp == THR_COND_DESTROYED) { \
150 _pthread_cond_init(pthread_cond_t * __restrict cond,
151 const pthread_condattr_t * __restrict cond_attr)
155 return (cond_init(cond, cond_attr));
159 _pthread_cond_destroy(pthread_cond_t *cond)
161 struct pthread_cond *cvp;
165 if (*cond == THR_PSHARED_PTR) {
166 cvp = __thr_pshared_offpage(cond, 0);
168 __thr_pshared_destroy(cond);
169 *cond = THR_COND_DESTROYED;
170 } else if ((cvp = *cond) == THR_COND_INITIALIZER) {
172 } else if (cvp == THR_COND_DESTROYED) {
176 *cond = THR_COND_DESTROYED;
183 * Cancellation behavior:
184 * Thread may be canceled at start, if thread is canceled, it means it
185 * did not get a wakeup from pthread_cond_signal(), otherwise, it is
187 * Thread cancellation never cause wakeup from pthread_cond_signal()
191 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
192 const struct timespec *abstime, int cancel)
194 struct pthread *curthread;
195 int error, error2, recurse, robust;
197 curthread = _get_curthread();
198 robust = _mutex_enter_robust(curthread, mp);
200 error = _mutex_cv_detach(mp, &recurse);
203 _mutex_leave_robust(curthread, mp);
208 _thr_cancel_enter2(curthread, 0);
209 error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
210 CVWAIT_ABSTIME | CVWAIT_CLOCKID);
212 _thr_cancel_leave(curthread, 0);
215 * Note that PP mutex and ROBUST mutex may return
216 * interesting error codes.
219 error2 = _mutex_cv_lock(mp, recurse, true);
220 } else if (error == EINTR || error == ETIMEDOUT) {
221 error2 = _mutex_cv_lock(mp, recurse, true);
223 * Do not do cancellation on EOWNERDEAD there. The
224 * cancellation cleanup handler will use the protected
225 * state and unlock the mutex without making the state
226 * consistent and the state will be unrecoverable.
228 if (error2 == 0 && cancel) {
230 _mutex_leave_robust(curthread, mp);
233 _thr_testcancel(curthread);
239 /* We know that it didn't unlock the mutex. */
240 _mutex_cv_attach(mp, recurse);
243 _mutex_leave_robust(curthread, mp);
246 _thr_testcancel(curthread);
251 _mutex_leave_robust(curthread, mp);
252 return (error2 != 0 ? error2 : error);
256 * Thread waits in userland queue whenever possible, when thread
257 * is signaled or broadcasted, it is removed from the queue, and
258 * is saved in curthread's defer_waiters[] buffer, but won't be
259 * woken up until mutex is unlocked.
263 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
264 const struct timespec *abstime, int cancel)
266 struct pthread *curthread;
267 struct sleepqueue *sq;
268 int deferred, error, error2, recurse;
270 curthread = _get_curthread();
271 if (curthread->wchan != NULL)
272 PANIC("thread %p was already on queue.", curthread);
275 _thr_testcancel(curthread);
279 * set __has_user_waiters before unlocking mutex, this allows
280 * us to check it without locking in pthread_cond_signal().
282 cvp->__has_user_waiters = 1;
284 (void)_mutex_cv_unlock(mp, &recurse, &deferred);
285 curthread->mutex_obj = mp;
286 _sleepq_add(cvp, curthread);
288 _thr_clear_wake(curthread);
292 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
293 (void)_umtx_op_err(&mp->m_lock,
294 UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
297 if (curthread->nwaiter_defer > 0) {
298 _thr_wake_all(curthread->defer_waiters,
299 curthread->nwaiter_defer);
300 curthread->nwaiter_defer = 0;
304 _thr_cancel_enter2(curthread, 0);
305 error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime);
307 _thr_cancel_leave(curthread, 0);
310 if (curthread->wchan == NULL) {
313 } else if (cancel && SHOULD_CANCEL(curthread)) {
314 sq = _sleepq_lookup(cvp);
315 cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
317 curthread->mutex_obj = NULL;
318 error2 = _mutex_cv_lock(mp, recurse, false);
319 if (!THR_IN_CRITICAL(curthread))
320 _pthread_exit(PTHREAD_CANCELED);
321 else /* this should not happen */
323 } else if (error == ETIMEDOUT) {
324 sq = _sleepq_lookup(cvp);
325 cvp->__has_user_waiters =
326 _sleepq_remove(sq, curthread);
331 curthread->mutex_obj = NULL;
332 error2 = _mutex_cv_lock(mp, recurse, false);
339 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
340 const struct timespec *abstime, int cancel)
342 struct pthread *curthread = _get_curthread();
343 struct pthread_cond *cvp;
344 struct pthread_mutex *mp;
349 if (*mutex == THR_PSHARED_PTR) {
350 mp = __thr_pshared_offpage(mutex, 0);
357 if ((error = _mutex_owned(curthread, mp)) != 0)
360 if (curthread->attr.sched_policy != SCHED_OTHER ||
361 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
362 USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp))
363 return (cond_wait_kernel(cvp, mp, abstime, cancel));
365 return (cond_wait_user(cvp, mp, abstime, cancel));
369 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
372 return (cond_wait_common(cond, mutex, NULL, 0));
376 __pthread_cond_wait(pthread_cond_t * __restrict cond,
377 pthread_mutex_t * __restrict mutex)
380 return (cond_wait_common(cond, mutex, NULL, 1));
384 _pthread_cond_timedwait(pthread_cond_t * __restrict cond,
385 pthread_mutex_t * __restrict mutex,
386 const struct timespec * __restrict abstime)
389 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
390 abstime->tv_nsec >= 1000000000)
393 return (cond_wait_common(cond, mutex, abstime, 0));
397 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
398 const struct timespec *abstime)
401 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
402 abstime->tv_nsec >= 1000000000)
405 return (cond_wait_common(cond, mutex, abstime, 1));
409 cond_signal_common(pthread_cond_t *cond)
411 struct pthread *curthread = _get_curthread();
413 struct pthread_cond *cvp;
414 struct pthread_mutex *mp;
415 struct sleepqueue *sq;
420 * If the condition variable is statically initialized, perform dynamic
425 pshared = CV_PSHARED(cvp);
427 _thr_ucond_signal(&cvp->kcond);
429 if (pshared || cvp->__has_user_waiters == 0)
432 curthread = _get_curthread();
435 sq = _sleepq_lookup(cvp);
441 td = _sleepq_first(sq);
443 cvp->__has_user_waiters = _sleepq_remove(sq, td);
444 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
445 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
446 _thr_wake_all(curthread->defer_waiters,
447 curthread->nwaiter_defer);
448 curthread->nwaiter_defer = 0;
450 curthread->defer_waiters[curthread->nwaiter_defer++] =
451 &td->wake_addr->value;
452 mp->m_flags |= PMUTEX_FLAG_DEFERRED;
454 waddr = &td->wake_addr->value;
458 _thr_set_wake(waddr);
462 struct broadcast_arg {
463 struct pthread *curthread;
464 unsigned int *waddrs[MAX_DEFER_WAITERS];
469 drop_cb(struct pthread *td, void *arg)
471 struct broadcast_arg *ba = arg;
472 struct pthread_mutex *mp;
473 struct pthread *curthread = ba->curthread;
476 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
477 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
478 _thr_wake_all(curthread->defer_waiters,
479 curthread->nwaiter_defer);
480 curthread->nwaiter_defer = 0;
482 curthread->defer_waiters[curthread->nwaiter_defer++] =
483 &td->wake_addr->value;
484 mp->m_flags |= PMUTEX_FLAG_DEFERRED;
486 if (ba->count >= MAX_DEFER_WAITERS) {
487 _thr_wake_all(ba->waddrs, ba->count);
490 ba->waddrs[ba->count++] = &td->wake_addr->value;
495 cond_broadcast_common(pthread_cond_t *cond)
498 struct pthread_cond *cvp;
499 struct sleepqueue *sq;
500 struct broadcast_arg ba;
503 * If the condition variable is statically initialized, perform dynamic
508 pshared = CV_PSHARED(cvp);
510 _thr_ucond_broadcast(&cvp->kcond);
512 if (pshared || cvp->__has_user_waiters == 0)
515 ba.curthread = _get_curthread();
519 sq = _sleepq_lookup(cvp);
524 _sleepq_drop(sq, drop_cb, &ba);
525 cvp->__has_user_waiters = 0;
528 _thr_wake_all(ba.waddrs, ba.count);
533 _pthread_cond_signal(pthread_cond_t * cond)
536 return (cond_signal_common(cond));
540 _pthread_cond_broadcast(pthread_cond_t * cond)
543 return (cond_broadcast_common(cond));