2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * Copyright (c) 2015 The FreeBSD Foundation
8 * Portions of this software were developed by Konstantin Belousov
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include "namespace.h"
42 #include "un-namespace.h"
44 #include "thr_private.h"
46 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE,
47 "pthread_cond too large");
52 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
53 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
54 const struct timespec * abstime);
55 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
56 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
57 const struct timespec *abstime, int cancel);
58 static int cond_signal_common(pthread_cond_t *cond);
59 static int cond_broadcast_common(pthread_cond_t *cond);
62 * Double underscore versions are cancellation points. Single underscore
63 * versions are not and are provided for libc internal usage (which
64 * shouldn't introduce cancellation points).
66 __weak_reference(__pthread_cond_wait, pthread_cond_wait);
67 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
69 __weak_reference(_pthread_cond_init, pthread_cond_init);
70 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
71 __weak_reference(_pthread_cond_signal, pthread_cond_signal);
72 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
74 #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0)
77 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
81 cvp->kcond.c_clockid = CLOCK_REALTIME;
84 cvp->kcond.c_flags |= USYNC_PROCESS_SHARED;
85 cvp->kcond.c_clockid = cattr->c_clockid;
90 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
92 struct pthread_cond *cvp;
93 const struct pthread_cond_attr *cattr;
96 cattr = cond_attr != NULL ? *cond_attr : NULL;
97 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
99 cvp = calloc(1, sizeof(struct pthread_cond));
104 cvp = __thr_pshared_offpage(cond, 1);
110 * Initialise the condition variable structure:
112 cond_init_body(cvp, cattr);
113 *cond = pshared ? THR_PSHARED_PTR : cvp;
118 init_static(struct pthread *thread, pthread_cond_t *cond)
122 THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
125 ret = cond_init(cond, NULL);
129 THR_LOCK_RELEASE(thread, &_cond_static_lock);
134 #define CHECK_AND_INIT_COND \
135 if (*cond == THR_PSHARED_PTR) { \
136 cvp = __thr_pshared_offpage(cond, 0); \
139 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
140 if (cvp == THR_COND_INITIALIZER) { \
142 ret = init_static(_get_curthread(), cond); \
145 } else if (cvp == THR_COND_DESTROYED) { \
152 _pthread_cond_init(pthread_cond_t * __restrict cond,
153 const pthread_condattr_t * __restrict cond_attr)
157 return (cond_init(cond, cond_attr));
161 _pthread_cond_destroy(pthread_cond_t *cond)
163 struct pthread_cond *cvp;
167 if (*cond == THR_PSHARED_PTR) {
168 cvp = __thr_pshared_offpage(cond, 0);
170 if (cvp->kcond.c_has_waiters)
173 __thr_pshared_destroy(cond);
176 *cond = THR_COND_DESTROYED;
177 } else if ((cvp = *cond) == THR_COND_INITIALIZER) {
179 } else if (cvp == THR_COND_DESTROYED) {
183 if (cvp->__has_user_waiters || cvp->kcond.c_has_waiters)
186 *cond = THR_COND_DESTROYED;
194 * Cancellation behavior:
195 * Thread may be canceled at start, if thread is canceled, it means it
196 * did not get a wakeup from pthread_cond_signal(), otherwise, it is
198 * Thread cancellation never cause wakeup from pthread_cond_signal()
202 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
203 const struct timespec *abstime, int cancel)
205 struct pthread *curthread;
206 int error, error2, recurse, robust;
208 curthread = _get_curthread();
209 robust = _mutex_enter_robust(curthread, mp);
211 error = _mutex_cv_detach(mp, &recurse);
214 _mutex_leave_robust(curthread, mp);
219 _thr_cancel_enter2(curthread, 0);
220 error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
221 CVWAIT_ABSTIME | CVWAIT_CLOCKID);
223 _thr_cancel_leave(curthread, 0);
226 * Note that PP mutex and ROBUST mutex may return
227 * interesting error codes.
230 error2 = _mutex_cv_lock(mp, recurse, true);
231 } else if (error == EINTR || error == ETIMEDOUT) {
232 error2 = _mutex_cv_lock(mp, recurse, true);
234 * Do not do cancellation on EOWNERDEAD there. The
235 * cancellation cleanup handler will use the protected
236 * state and unlock the mutex without making the state
237 * consistent and the state will be unrecoverable.
239 if (error2 == 0 && cancel) {
241 _mutex_leave_robust(curthread, mp);
244 _thr_testcancel(curthread);
250 /* We know that it didn't unlock the mutex. */
251 _mutex_cv_attach(mp, recurse);
254 _mutex_leave_robust(curthread, mp);
257 _thr_testcancel(curthread);
262 _mutex_leave_robust(curthread, mp);
263 return (error2 != 0 ? error2 : error);
267 * Thread waits in userland queue whenever possible, when thread
268 * is signaled or broadcasted, it is removed from the queue, and
269 * is saved in curthread's defer_waiters[] buffer, but won't be
270 * woken up until mutex is unlocked.
274 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
275 const struct timespec *abstime, int cancel)
277 struct pthread *curthread;
278 struct sleepqueue *sq;
279 int deferred, error, error2, recurse;
281 curthread = _get_curthread();
282 if (curthread->wchan != NULL)
283 PANIC("thread %p was already on queue.", curthread);
286 _thr_testcancel(curthread);
290 * set __has_user_waiters before unlocking mutex, this allows
291 * us to check it without locking in pthread_cond_signal().
293 cvp->__has_user_waiters = 1;
295 (void)_mutex_cv_unlock(mp, &recurse, &deferred);
296 curthread->mutex_obj = mp;
297 _sleepq_add(cvp, curthread);
299 _thr_clear_wake(curthread);
303 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
304 (void)_umtx_op_err(&mp->m_lock,
305 UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
308 if (curthread->nwaiter_defer > 0) {
309 _thr_wake_all(curthread->defer_waiters,
310 curthread->nwaiter_defer);
311 curthread->nwaiter_defer = 0;
315 _thr_cancel_enter2(curthread, 0);
316 error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime);
318 _thr_cancel_leave(curthread, 0);
321 if (curthread->wchan == NULL) {
324 } else if (cancel && SHOULD_CANCEL(curthread)) {
325 sq = _sleepq_lookup(cvp);
326 cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
328 curthread->mutex_obj = NULL;
329 error2 = _mutex_cv_lock(mp, recurse, false);
330 if (!THR_IN_CRITICAL(curthread))
331 _pthread_exit(PTHREAD_CANCELED);
332 else /* this should not happen */
334 } else if (error == ETIMEDOUT) {
335 sq = _sleepq_lookup(cvp);
336 cvp->__has_user_waiters =
337 _sleepq_remove(sq, curthread);
342 curthread->mutex_obj = NULL;
343 error2 = _mutex_cv_lock(mp, recurse, false);
350 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
351 const struct timespec *abstime, int cancel)
353 struct pthread *curthread = _get_curthread();
354 struct pthread_cond *cvp;
355 struct pthread_mutex *mp;
360 if (*mutex == THR_PSHARED_PTR) {
361 mp = __thr_pshared_offpage(mutex, 0);
368 if ((error = _mutex_owned(curthread, mp)) != 0)
371 if (curthread->attr.sched_policy != SCHED_OTHER ||
372 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
373 USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp))
374 return (cond_wait_kernel(cvp, mp, abstime, cancel));
376 return (cond_wait_user(cvp, mp, abstime, cancel));
380 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
383 return (cond_wait_common(cond, mutex, NULL, 0));
387 __pthread_cond_wait(pthread_cond_t * __restrict cond,
388 pthread_mutex_t * __restrict mutex)
391 return (cond_wait_common(cond, mutex, NULL, 1));
395 _pthread_cond_timedwait(pthread_cond_t * __restrict cond,
396 pthread_mutex_t * __restrict mutex,
397 const struct timespec * __restrict abstime)
400 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
401 abstime->tv_nsec >= 1000000000)
404 return (cond_wait_common(cond, mutex, abstime, 0));
408 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
409 const struct timespec *abstime)
412 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
413 abstime->tv_nsec >= 1000000000)
416 return (cond_wait_common(cond, mutex, abstime, 1));
420 cond_signal_common(pthread_cond_t *cond)
422 struct pthread *curthread = _get_curthread();
424 struct pthread_cond *cvp;
425 struct pthread_mutex *mp;
426 struct sleepqueue *sq;
431 * If the condition variable is statically initialized, perform dynamic
436 pshared = CV_PSHARED(cvp);
438 _thr_ucond_signal(&cvp->kcond);
440 if (pshared || cvp->__has_user_waiters == 0)
443 curthread = _get_curthread();
446 sq = _sleepq_lookup(cvp);
452 td = _sleepq_first(sq);
454 cvp->__has_user_waiters = _sleepq_remove(sq, td);
455 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
456 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
457 _thr_wake_all(curthread->defer_waiters,
458 curthread->nwaiter_defer);
459 curthread->nwaiter_defer = 0;
461 curthread->defer_waiters[curthread->nwaiter_defer++] =
462 &td->wake_addr->value;
463 mp->m_flags |= PMUTEX_FLAG_DEFERRED;
465 waddr = &td->wake_addr->value;
469 _thr_set_wake(waddr);
473 struct broadcast_arg {
474 struct pthread *curthread;
475 unsigned int *waddrs[MAX_DEFER_WAITERS];
480 drop_cb(struct pthread *td, void *arg)
482 struct broadcast_arg *ba = arg;
483 struct pthread_mutex *mp;
484 struct pthread *curthread = ba->curthread;
487 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
488 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
489 _thr_wake_all(curthread->defer_waiters,
490 curthread->nwaiter_defer);
491 curthread->nwaiter_defer = 0;
493 curthread->defer_waiters[curthread->nwaiter_defer++] =
494 &td->wake_addr->value;
495 mp->m_flags |= PMUTEX_FLAG_DEFERRED;
497 if (ba->count >= MAX_DEFER_WAITERS) {
498 _thr_wake_all(ba->waddrs, ba->count);
501 ba->waddrs[ba->count++] = &td->wake_addr->value;
506 cond_broadcast_common(pthread_cond_t *cond)
509 struct pthread_cond *cvp;
510 struct sleepqueue *sq;
511 struct broadcast_arg ba;
514 * If the condition variable is statically initialized, perform dynamic
519 pshared = CV_PSHARED(cvp);
521 _thr_ucond_broadcast(&cvp->kcond);
523 if (pshared || cvp->__has_user_waiters == 0)
526 ba.curthread = _get_curthread();
530 sq = _sleepq_lookup(cvp);
535 _sleepq_drop(sq, drop_cb, &ba);
536 cvp->__has_user_waiters = 0;
539 _thr_wake_all(ba.waddrs, ba.count);
544 _pthread_cond_signal(pthread_cond_t * cond)
547 return (cond_signal_common(cond));
551 _pthread_cond_broadcast(pthread_cond_t * cond)
554 return (cond_broadcast_common(cond));