2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_umtx_profiling.h"
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
43 #include <sys/filedesc.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/taskqueue.h>
64 #include <sys/eventhandler.h>
67 #include <security/mac/mac_framework.h>
70 #include <vm/vm_param.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
75 #include <machine/atomic.h>
76 #include <machine/cpu.h>
78 #include <compat/freebsd32/freebsd32.h>
79 #ifdef COMPAT_FREEBSD32
80 #include <compat/freebsd32/freebsd32_proto.h>
84 #define _UMUTEX_WAIT 2
87 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
88 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
91 /* Priority inheritance mutex info. */
94 struct thread *pi_owner;
99 /* List entry to link umtx holding by thread */
100 TAILQ_ENTRY(umtx_pi) pi_link;
102 /* List entry in hash */
103 TAILQ_ENTRY(umtx_pi) pi_hashlink;
105 /* List for waiters */
106 TAILQ_HEAD(,umtx_q) pi_blocked;
108 /* Identify a userland lock object */
109 struct umtx_key pi_key;
112 /* A userland synchronous object user. */
114 /* Linked list for the hash. */
115 TAILQ_ENTRY(umtx_q) uq_link;
118 struct umtx_key uq_key;
122 #define UQF_UMTXQ 0x0001
124 /* The thread waits on. */
125 struct thread *uq_thread;
128 * Blocked on PI mutex. read can use chain lock
129 * or umtx_lock, write must have both chain lock and
130 * umtx_lock being hold.
132 struct umtx_pi *uq_pi_blocked;
134 /* On blocked list */
135 TAILQ_ENTRY(umtx_q) uq_lockq;
137 /* Thread contending with us */
138 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
140 /* Inherited priority from PP mutex */
141 u_char uq_inherited_pri;
143 /* Spare queue ready to be reused */
144 struct umtxq_queue *uq_spare_queue;
146 /* The queue we on */
147 struct umtxq_queue *uq_cur_queue;
150 TAILQ_HEAD(umtxq_head, umtx_q);
152 /* Per-key wait-queue */
154 struct umtxq_head head;
156 LIST_ENTRY(umtxq_queue) link;
160 LIST_HEAD(umtxq_list, umtxq_queue);
162 /* Userland lock object's wait-queue chain */
164 /* Lock for this chain. */
167 /* List of sleep queues. */
168 struct umtxq_list uc_queue[2];
169 #define UMTX_SHARED_QUEUE 0
170 #define UMTX_EXCLUSIVE_QUEUE 1
172 LIST_HEAD(, umtxq_queue) uc_spare_queue;
177 /* Chain lock waiters */
180 /* All PI in the list */
181 TAILQ_HEAD(,umtx_pi) uc_pi_list;
183 #ifdef UMTX_PROFILING
189 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
192 * Don't propagate time-sharing priority, there is a security reason,
193 * a user can simply introduce PI-mutex, let thread A lock the mutex,
194 * and let another thread B block on the mutex, because B is
195 * sleeping, its priority will be boosted, this causes A's priority to
196 * be boosted via priority propagating too and will never be lowered even
197 * if it is using 100%CPU, this is unfair to other processes.
200 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
201 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
202 PRI_MAX_TIMESHARE : (td)->td_user_pri)
204 #define GOLDEN_RATIO_PRIME 2654404609U
206 #define UMTX_CHAINS 512
208 #define UMTX_SHIFTS (__WORD_BIT - 9)
210 #define GET_SHARE(flags) \
211 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
213 #define BUSY_SPINS 200
217 bool is_abs_real; /* TIMER_ABSTIME && CLOCK_REALTIME* */
222 struct umtx_copyops {
223 int (*copyin_timeout)(const void *uaddr, struct timespec *tsp);
224 int (*copyin_umtx_time)(const void *uaddr, size_t size,
225 struct _umtx_time *tp);
226 int (*copyin_robust_lists)(const void *uaddr, size_t size,
227 struct umtx_robust_lists_params *rbp);
228 int (*copyout_timeout)(void *uaddr, size_t size,
229 struct timespec *tsp);
230 const size_t timespec_sz;
231 const size_t umtx_time_sz;
235 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
236 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
237 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
239 int umtx_shm_vnobj_persistent = 0;
240 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
241 &umtx_shm_vnobj_persistent, 0,
242 "False forces destruction of umtx attached to file, on last close");
243 static int umtx_max_rb = 1000;
244 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
248 static uma_zone_t umtx_pi_zone;
249 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
250 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
251 static int umtx_pi_allocated;
253 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
254 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
255 &umtx_pi_allocated, 0, "Allocated umtx_pi");
256 static int umtx_verbose_rb = 1;
257 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
261 #ifdef UMTX_PROFILING
262 static long max_length;
263 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
264 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
267 static void abs_timeout_update(struct abs_timeout *timo);
269 static void umtx_shm_init(void);
270 static void umtxq_sysinit(void *);
271 static void umtxq_hash(struct umtx_key *key);
272 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
273 static void umtxq_lock(struct umtx_key *key);
274 static void umtxq_unlock(struct umtx_key *key);
275 static void umtxq_busy(struct umtx_key *key);
276 static void umtxq_unbusy(struct umtx_key *key);
277 static void umtxq_insert_queue(struct umtx_q *uq, int q);
278 static void umtxq_remove_queue(struct umtx_q *uq, int q);
279 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
280 static int umtxq_count(struct umtx_key *key);
281 static struct umtx_pi *umtx_pi_alloc(int);
282 static void umtx_pi_free(struct umtx_pi *pi);
283 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
285 static void umtx_thread_cleanup(struct thread *td);
286 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
288 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
289 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
290 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
292 static struct mtx umtx_lock;
294 #ifdef UMTX_PROFILING
296 umtx_init_profiling(void)
298 struct sysctl_oid *chain_oid;
302 for (i = 0; i < UMTX_CHAINS; ++i) {
303 snprintf(chain_name, sizeof(chain_name), "%d", i);
304 chain_oid = SYSCTL_ADD_NODE(NULL,
305 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
306 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
307 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
308 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
309 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
310 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
315 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
319 struct umtxq_chain *uc;
320 u_int fract, i, j, tot, whole;
321 u_int sf0, sf1, sf2, sf3, sf4;
322 u_int si0, si1, si2, si3, si4;
323 u_int sw0, sw1, sw2, sw3, sw4;
325 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
326 for (i = 0; i < 2; i++) {
328 for (j = 0; j < UMTX_CHAINS; ++j) {
329 uc = &umtxq_chains[i][j];
330 mtx_lock(&uc->uc_lock);
331 tot += uc->max_length;
332 mtx_unlock(&uc->uc_lock);
335 sbuf_printf(&sb, "%u) Empty ", i);
337 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
338 si0 = si1 = si2 = si3 = si4 = 0;
339 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
340 for (j = 0; j < UMTX_CHAINS; j++) {
341 uc = &umtxq_chains[i][j];
342 mtx_lock(&uc->uc_lock);
343 whole = uc->max_length * 100;
344 mtx_unlock(&uc->uc_lock);
345 fract = (whole % tot) * 100;
346 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
350 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
355 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
360 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
365 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
372 sbuf_printf(&sb, "queue %u:\n", i);
373 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
375 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
377 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
379 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
381 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
387 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
393 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
395 struct umtxq_chain *uc;
400 error = sysctl_handle_int(oidp, &clear, 0, req);
401 if (error != 0 || req->newptr == NULL)
405 for (i = 0; i < 2; ++i) {
406 for (j = 0; j < UMTX_CHAINS; ++j) {
407 uc = &umtxq_chains[i][j];
408 mtx_lock(&uc->uc_lock);
411 mtx_unlock(&uc->uc_lock);
418 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
419 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
420 sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics");
421 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
422 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
423 sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length");
427 umtxq_sysinit(void *arg __unused)
431 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
432 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
433 for (i = 0; i < 2; ++i) {
434 for (j = 0; j < UMTX_CHAINS; ++j) {
435 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
436 MTX_DEF | MTX_DUPOK);
437 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
438 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
439 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
440 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
441 umtxq_chains[i][j].uc_busy = 0;
442 umtxq_chains[i][j].uc_waiters = 0;
443 #ifdef UMTX_PROFILING
444 umtxq_chains[i][j].length = 0;
445 umtxq_chains[i][j].max_length = 0;
449 #ifdef UMTX_PROFILING
450 umtx_init_profiling();
452 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
461 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
462 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
464 TAILQ_INIT(&uq->uq_spare_queue->head);
465 TAILQ_INIT(&uq->uq_pi_contested);
466 uq->uq_inherited_pri = PRI_MAX;
471 umtxq_free(struct umtx_q *uq)
474 MPASS(uq->uq_spare_queue != NULL);
475 free(uq->uq_spare_queue, M_UMTX);
480 umtxq_hash(struct umtx_key *key)
484 n = (uintptr_t)key->info.both.a + key->info.both.b;
485 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
488 static inline struct umtxq_chain *
489 umtxq_getchain(struct umtx_key *key)
492 if (key->type <= TYPE_SEM)
493 return (&umtxq_chains[1][key->hash]);
494 return (&umtxq_chains[0][key->hash]);
501 umtxq_lock(struct umtx_key *key)
503 struct umtxq_chain *uc;
505 uc = umtxq_getchain(key);
506 mtx_lock(&uc->uc_lock);
513 umtxq_unlock(struct umtx_key *key)
515 struct umtxq_chain *uc;
517 uc = umtxq_getchain(key);
518 mtx_unlock(&uc->uc_lock);
522 * Set chain to busy state when following operation
523 * may be blocked (kernel mutex can not be used).
526 umtxq_busy(struct umtx_key *key)
528 struct umtxq_chain *uc;
530 uc = umtxq_getchain(key);
531 mtx_assert(&uc->uc_lock, MA_OWNED);
535 int count = BUSY_SPINS;
538 while (uc->uc_busy && --count > 0)
544 while (uc->uc_busy) {
546 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
557 umtxq_unbusy(struct umtx_key *key)
559 struct umtxq_chain *uc;
561 uc = umtxq_getchain(key);
562 mtx_assert(&uc->uc_lock, MA_OWNED);
563 KASSERT(uc->uc_busy != 0, ("not busy"));
570 umtxq_unbusy_unlocked(struct umtx_key *key)
578 static struct umtxq_queue *
579 umtxq_queue_lookup(struct umtx_key *key, int q)
581 struct umtxq_queue *uh;
582 struct umtxq_chain *uc;
584 uc = umtxq_getchain(key);
585 UMTXQ_LOCKED_ASSERT(uc);
586 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
587 if (umtx_key_match(&uh->key, key))
595 umtxq_insert_queue(struct umtx_q *uq, int q)
597 struct umtxq_queue *uh;
598 struct umtxq_chain *uc;
600 uc = umtxq_getchain(&uq->uq_key);
601 UMTXQ_LOCKED_ASSERT(uc);
602 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
603 uh = umtxq_queue_lookup(&uq->uq_key, q);
605 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
607 uh = uq->uq_spare_queue;
608 uh->key = uq->uq_key;
609 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
610 #ifdef UMTX_PROFILING
612 if (uc->length > uc->max_length) {
613 uc->max_length = uc->length;
614 if (uc->max_length > max_length)
615 max_length = uc->max_length;
619 uq->uq_spare_queue = NULL;
621 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
623 uq->uq_flags |= UQF_UMTXQ;
624 uq->uq_cur_queue = uh;
629 umtxq_remove_queue(struct umtx_q *uq, int q)
631 struct umtxq_chain *uc;
632 struct umtxq_queue *uh;
634 uc = umtxq_getchain(&uq->uq_key);
635 UMTXQ_LOCKED_ASSERT(uc);
636 if (uq->uq_flags & UQF_UMTXQ) {
637 uh = uq->uq_cur_queue;
638 TAILQ_REMOVE(&uh->head, uq, uq_link);
640 uq->uq_flags &= ~UQF_UMTXQ;
641 if (TAILQ_EMPTY(&uh->head)) {
642 KASSERT(uh->length == 0,
643 ("inconsistent umtxq_queue length"));
644 #ifdef UMTX_PROFILING
647 LIST_REMOVE(uh, link);
649 uh = LIST_FIRST(&uc->uc_spare_queue);
650 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
651 LIST_REMOVE(uh, link);
653 uq->uq_spare_queue = uh;
654 uq->uq_cur_queue = NULL;
659 * Check if there are multiple waiters
662 umtxq_count(struct umtx_key *key)
664 struct umtxq_queue *uh;
666 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
667 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
674 * Check if there are multiple PI waiters and returns first
678 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
680 struct umtxq_queue *uh;
683 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
684 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
686 *first = TAILQ_FIRST(&uh->head);
693 * Wake up threads waiting on an userland object.
697 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
699 struct umtxq_queue *uh;
704 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
705 uh = umtxq_queue_lookup(key, q);
707 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
708 umtxq_remove_queue(uq, q);
719 * Wake up specified thread.
722 umtxq_signal_thread(struct umtx_q *uq)
725 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
731 tstohz(const struct timespec *tsp)
735 TIMESPEC_TO_TIMEVAL(&tv, tsp);
740 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
741 const struct timespec *timeout)
744 timo->clockid = clockid;
746 timo->is_abs_real = false;
747 abs_timeout_update(timo);
748 timespecadd(&timo->cur, timeout, &timo->end);
750 timo->end = *timeout;
751 timo->is_abs_real = clockid == CLOCK_REALTIME ||
752 clockid == CLOCK_REALTIME_FAST ||
753 clockid == CLOCK_REALTIME_PRECISE;
755 * If is_abs_real, umtxq_sleep will read the clock
756 * after setting td_rtcgen; otherwise, read it here.
758 if (!timo->is_abs_real) {
759 abs_timeout_update(timo);
765 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
768 abs_timeout_init(timo, umtxtime->_clockid,
769 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
773 abs_timeout_update(struct abs_timeout *timo)
776 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
780 abs_timeout_gethz(struct abs_timeout *timo)
784 if (timespeccmp(&timo->end, &timo->cur, <=))
786 timespecsub(&timo->end, &timo->cur, &tts);
787 return (tstohz(&tts));
791 umtx_unlock_val(uint32_t flags, bool rb)
795 return (UMUTEX_RB_OWNERDEAD);
796 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
797 return (UMUTEX_RB_NOTRECOV);
799 return (UMUTEX_UNOWNED);
804 * Put thread into sleep state, before sleeping, check if
805 * thread was removed from umtx queue.
808 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
810 struct umtxq_chain *uc;
813 if (abstime != NULL && abstime->is_abs_real) {
814 curthread->td_rtcgen = atomic_load_acq_int(&rtc_generation);
815 abs_timeout_update(abstime);
818 uc = umtxq_getchain(&uq->uq_key);
819 UMTXQ_LOCKED_ASSERT(uc);
821 if (!(uq->uq_flags & UQF_UMTXQ)) {
825 if (abstime != NULL) {
826 timo = abs_timeout_gethz(abstime);
833 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
834 if (error == EINTR || error == ERESTART) {
835 umtxq_lock(&uq->uq_key);
838 if (abstime != NULL) {
839 if (abstime->is_abs_real)
840 curthread->td_rtcgen =
841 atomic_load_acq_int(&rtc_generation);
842 abs_timeout_update(abstime);
844 umtxq_lock(&uq->uq_key);
847 curthread->td_rtcgen = 0;
852 * Convert userspace address into unique logical address.
855 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
857 struct thread *td = curthread;
859 vm_map_entry_t entry;
865 if (share == THREAD_SHARE) {
867 key->info.private.vs = td->td_proc->p_vmspace;
868 key->info.private.addr = (uintptr_t)addr;
870 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
871 map = &td->td_proc->p_vmspace->vm_map;
872 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
873 &entry, &key->info.shared.object, &pindex, &prot,
874 &wired) != KERN_SUCCESS) {
878 if ((share == PROCESS_SHARE) ||
879 (share == AUTO_SHARE &&
880 VM_INHERIT_SHARE == entry->inheritance)) {
882 key->info.shared.offset = (vm_offset_t)addr -
883 entry->start + entry->offset;
884 vm_object_reference(key->info.shared.object);
887 key->info.private.vs = td->td_proc->p_vmspace;
888 key->info.private.addr = (uintptr_t)addr;
890 vm_map_lookup_done(map, entry);
901 umtx_key_release(struct umtx_key *key)
904 vm_object_deallocate(key->info.shared.object);
908 * Fetch and compare value, sleep on the address if value is not changed.
911 do_wait(struct thread *td, void *addr, u_long id,
912 struct _umtx_time *timeout, int compat32, int is_private)
914 struct abs_timeout timo;
921 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
922 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
926 abs_timeout_init2(&timo, timeout);
928 umtxq_lock(&uq->uq_key);
930 umtxq_unlock(&uq->uq_key);
932 error = fueword(addr, &tmp);
936 error = fueword32(addr, &tmp32);
942 umtxq_lock(&uq->uq_key);
945 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
947 if ((uq->uq_flags & UQF_UMTXQ) == 0)
951 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
954 umtxq_unlock(&uq->uq_key);
955 umtx_key_release(&uq->uq_key);
956 if (error == ERESTART)
962 * Wake up threads sleeping on the specified address.
965 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
970 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
971 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
974 umtxq_signal(&key, n_wake);
976 umtx_key_release(&key);
981 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
984 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
985 struct _umtx_time *timeout, int mode)
987 struct abs_timeout timo;
989 uint32_t owner, old, id;
996 abs_timeout_init2(&timo, timeout);
999 * Care must be exercised when dealing with umtx structure. It
1000 * can fault on any access.
1003 rv = fueword32(&m->m_owner, &owner);
1006 if (mode == _UMUTEX_WAIT) {
1007 if (owner == UMUTEX_UNOWNED ||
1008 owner == UMUTEX_CONTESTED ||
1009 owner == UMUTEX_RB_OWNERDEAD ||
1010 owner == UMUTEX_RB_NOTRECOV)
1014 * Robust mutex terminated. Kernel duty is to
1015 * return EOWNERDEAD to the userspace. The
1016 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1017 * by the common userspace code.
1019 if (owner == UMUTEX_RB_OWNERDEAD) {
1020 rv = casueword32(&m->m_owner,
1021 UMUTEX_RB_OWNERDEAD, &owner,
1022 id | UMUTEX_CONTESTED);
1026 MPASS(owner == UMUTEX_RB_OWNERDEAD);
1027 return (EOWNERDEAD); /* success */
1030 rv = thread_check_susp(td, false);
1035 if (owner == UMUTEX_RB_NOTRECOV)
1036 return (ENOTRECOVERABLE);
1039 * Try the uncontested case. This should be
1042 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1044 /* The address was invalid. */
1048 /* The acquire succeeded. */
1050 MPASS(owner == UMUTEX_UNOWNED);
1055 * If no one owns it but it is contested try
1059 if (owner == UMUTEX_CONTESTED) {
1060 rv = casueword32(&m->m_owner,
1061 UMUTEX_CONTESTED, &owner,
1062 id | UMUTEX_CONTESTED);
1063 /* The address was invalid. */
1067 MPASS(owner == UMUTEX_CONTESTED);
1071 rv = thread_check_susp(td, false);
1077 * If this failed the lock has
1083 /* rv == 1 but not contested, likely store failure */
1084 rv = thread_check_susp(td, false);
1089 if (mode == _UMUTEX_TRY)
1093 * If we caught a signal, we have retried and now
1099 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1100 GET_SHARE(flags), &uq->uq_key)) != 0)
1103 umtxq_lock(&uq->uq_key);
1104 umtxq_busy(&uq->uq_key);
1106 umtxq_unlock(&uq->uq_key);
1109 * Set the contested bit so that a release in user space
1110 * knows to use the system call for unlock. If this fails
1111 * either some one else has acquired the lock or it has been
1114 rv = casueword32(&m->m_owner, owner, &old,
1115 owner | UMUTEX_CONTESTED);
1117 /* The address was invalid or casueword failed to store. */
1118 if (rv == -1 || rv == 1) {
1119 umtxq_lock(&uq->uq_key);
1121 umtxq_unbusy(&uq->uq_key);
1122 umtxq_unlock(&uq->uq_key);
1123 umtx_key_release(&uq->uq_key);
1127 rv = thread_check_susp(td, false);
1135 * We set the contested bit, sleep. Otherwise the lock changed
1136 * and we need to retry or we lost a race to the thread
1137 * unlocking the umtx.
1139 umtxq_lock(&uq->uq_key);
1140 umtxq_unbusy(&uq->uq_key);
1141 MPASS(old == owner);
1142 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1145 umtxq_unlock(&uq->uq_key);
1146 umtx_key_release(&uq->uq_key);
1149 error = thread_check_susp(td, false);
1156 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1159 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1161 struct umtx_key key;
1162 uint32_t owner, old, id, newlock;
1169 * Make sure we own this mtx.
1171 error = fueword32(&m->m_owner, &owner);
1175 if ((owner & ~UMUTEX_CONTESTED) != id)
1178 newlock = umtx_unlock_val(flags, rb);
1179 if ((owner & UMUTEX_CONTESTED) == 0) {
1180 error = casueword32(&m->m_owner, owner, &old, newlock);
1184 error = thread_check_susp(td, false);
1189 MPASS(old == owner);
1193 /* We should only ever be in here for contested locks */
1194 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1200 count = umtxq_count(&key);
1204 * When unlocking the umtx, it must be marked as unowned if
1205 * there is zero or one thread only waiting for it.
1206 * Otherwise, it must be marked as contested.
1209 newlock |= UMUTEX_CONTESTED;
1210 error = casueword32(&m->m_owner, owner, &old, newlock);
1212 umtxq_signal(&key, 1);
1215 umtx_key_release(&key);
1221 error = thread_check_susp(td, false);
1230 * Check if the mutex is available and wake up a waiter,
1231 * only for simple mutex.
1234 do_wake_umutex(struct thread *td, struct umutex *m)
1236 struct umtx_key key;
1243 error = fueword32(&m->m_owner, &owner);
1247 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1248 owner != UMUTEX_RB_NOTRECOV)
1251 error = fueword32(&m->m_flags, &flags);
1255 /* We should only ever be in here for contested locks */
1256 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1262 count = umtxq_count(&key);
1265 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1266 owner != UMUTEX_RB_NOTRECOV) {
1267 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1271 } else if (error == 1) {
1275 umtx_key_release(&key);
1276 error = thread_check_susp(td, false);
1284 if (error == 0 && count != 0) {
1285 MPASS((owner & ~UMUTEX_CONTESTED) == 0 ||
1286 owner == UMUTEX_RB_OWNERDEAD ||
1287 owner == UMUTEX_RB_NOTRECOV);
1288 umtxq_signal(&key, 1);
1292 umtx_key_release(&key);
1297 * Check if the mutex has waiters and tries to fix contention bit.
1300 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1302 struct umtx_key key;
1303 uint32_t owner, old;
1308 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1312 type = TYPE_NORMAL_UMUTEX;
1314 case UMUTEX_PRIO_INHERIT:
1315 type = TYPE_PI_UMUTEX;
1317 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1318 type = TYPE_PI_ROBUST_UMUTEX;
1320 case UMUTEX_PRIO_PROTECT:
1321 type = TYPE_PP_UMUTEX;
1323 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1324 type = TYPE_PP_ROBUST_UMUTEX;
1329 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1335 count = umtxq_count(&key);
1338 error = fueword32(&m->m_owner, &owner);
1343 * Only repair contention bit if there is a waiter, this means
1344 * the mutex is still being referenced by userland code,
1345 * otherwise don't update any memory.
1347 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 &&
1348 (count > 1 || (count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) {
1349 error = casueword32(&m->m_owner, owner, &old,
1350 owner | UMUTEX_CONTESTED);
1356 MPASS(old == owner);
1360 error = thread_check_susp(td, false);
1364 if (error == EFAULT) {
1365 umtxq_signal(&key, INT_MAX);
1366 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1367 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1368 umtxq_signal(&key, 1);
1371 umtx_key_release(&key);
1375 static inline struct umtx_pi *
1376 umtx_pi_alloc(int flags)
1380 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1381 TAILQ_INIT(&pi->pi_blocked);
1382 atomic_add_int(&umtx_pi_allocated, 1);
1387 umtx_pi_free(struct umtx_pi *pi)
1389 uma_zfree(umtx_pi_zone, pi);
1390 atomic_add_int(&umtx_pi_allocated, -1);
1394 * Adjust the thread's position on a pi_state after its priority has been
1398 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1400 struct umtx_q *uq, *uq1, *uq2;
1403 mtx_assert(&umtx_lock, MA_OWNED);
1410 * Check if the thread needs to be moved on the blocked chain.
1411 * It needs to be moved if either its priority is lower than
1412 * the previous thread or higher than the next thread.
1414 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1415 uq2 = TAILQ_NEXT(uq, uq_lockq);
1416 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1417 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1419 * Remove thread from blocked chain and determine where
1420 * it should be moved to.
1422 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1423 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1424 td1 = uq1->uq_thread;
1425 MPASS(td1->td_proc->p_magic == P_MAGIC);
1426 if (UPRI(td1) > UPRI(td))
1431 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1433 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1438 static struct umtx_pi *
1439 umtx_pi_next(struct umtx_pi *pi)
1441 struct umtx_q *uq_owner;
1443 if (pi->pi_owner == NULL)
1445 uq_owner = pi->pi_owner->td_umtxq;
1446 if (uq_owner == NULL)
1448 return (uq_owner->uq_pi_blocked);
1452 * Floyd's Cycle-Finding Algorithm.
1455 umtx_pi_check_loop(struct umtx_pi *pi)
1457 struct umtx_pi *pi1; /* fast iterator */
1459 mtx_assert(&umtx_lock, MA_OWNED);
1464 pi = umtx_pi_next(pi);
1467 pi1 = umtx_pi_next(pi1);
1470 pi1 = umtx_pi_next(pi1);
1480 * Propagate priority when a thread is blocked on POSIX
1484 umtx_propagate_priority(struct thread *td)
1490 mtx_assert(&umtx_lock, MA_OWNED);
1493 pi = uq->uq_pi_blocked;
1496 if (umtx_pi_check_loop(pi))
1501 if (td == NULL || td == curthread)
1504 MPASS(td->td_proc != NULL);
1505 MPASS(td->td_proc->p_magic == P_MAGIC);
1508 if (td->td_lend_user_pri > pri)
1509 sched_lend_user_prio(td, pri);
1517 * Pick up the lock that td is blocked on.
1520 pi = uq->uq_pi_blocked;
1523 /* Resort td on the list if needed. */
1524 umtx_pi_adjust_thread(pi, td);
1529 * Unpropagate priority for a PI mutex when a thread blocked on
1530 * it is interrupted by signal or resumed by others.
1533 umtx_repropagate_priority(struct umtx_pi *pi)
1535 struct umtx_q *uq, *uq_owner;
1536 struct umtx_pi *pi2;
1539 mtx_assert(&umtx_lock, MA_OWNED);
1541 if (umtx_pi_check_loop(pi))
1543 while (pi != NULL && pi->pi_owner != NULL) {
1545 uq_owner = pi->pi_owner->td_umtxq;
1547 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1548 uq = TAILQ_FIRST(&pi2->pi_blocked);
1550 if (pri > UPRI(uq->uq_thread))
1551 pri = UPRI(uq->uq_thread);
1555 if (pri > uq_owner->uq_inherited_pri)
1556 pri = uq_owner->uq_inherited_pri;
1557 thread_lock(pi->pi_owner);
1558 sched_lend_user_prio(pi->pi_owner, pri);
1559 thread_unlock(pi->pi_owner);
1560 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1561 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1566 * Insert a PI mutex into owned list.
1569 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1571 struct umtx_q *uq_owner;
1573 uq_owner = owner->td_umtxq;
1574 mtx_assert(&umtx_lock, MA_OWNED);
1575 MPASS(pi->pi_owner == NULL);
1576 pi->pi_owner = owner;
1577 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1582 * Disown a PI mutex, and remove it from the owned list.
1585 umtx_pi_disown(struct umtx_pi *pi)
1588 mtx_assert(&umtx_lock, MA_OWNED);
1589 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1590 pi->pi_owner = NULL;
1594 * Claim ownership of a PI mutex.
1597 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1602 mtx_lock(&umtx_lock);
1603 if (pi->pi_owner == owner) {
1604 mtx_unlock(&umtx_lock);
1608 if (pi->pi_owner != NULL) {
1610 * userland may have already messed the mutex, sigh.
1612 mtx_unlock(&umtx_lock);
1615 umtx_pi_setowner(pi, owner);
1616 uq = TAILQ_FIRST(&pi->pi_blocked);
1618 pri = UPRI(uq->uq_thread);
1620 if (pri < UPRI(owner))
1621 sched_lend_user_prio(owner, pri);
1622 thread_unlock(owner);
1624 mtx_unlock(&umtx_lock);
1629 * Adjust a thread's order position in its blocked PI mutex,
1630 * this may result new priority propagating process.
1633 umtx_pi_adjust(struct thread *td, u_char oldpri)
1639 mtx_lock(&umtx_lock);
1641 * Pick up the lock that td is blocked on.
1643 pi = uq->uq_pi_blocked;
1645 umtx_pi_adjust_thread(pi, td);
1646 umtx_repropagate_priority(pi);
1648 mtx_unlock(&umtx_lock);
1652 * Sleep on a PI mutex.
1655 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1656 const char *wmesg, struct abs_timeout *timo, bool shared)
1658 struct thread *td, *td1;
1662 struct umtxq_chain *uc;
1664 uc = umtxq_getchain(&pi->pi_key);
1668 KASSERT(td == curthread, ("inconsistent uq_thread"));
1669 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
1670 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1672 mtx_lock(&umtx_lock);
1673 if (pi->pi_owner == NULL) {
1674 mtx_unlock(&umtx_lock);
1675 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
1676 mtx_lock(&umtx_lock);
1678 if (pi->pi_owner == NULL)
1679 umtx_pi_setowner(pi, td1);
1680 PROC_UNLOCK(td1->td_proc);
1684 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1685 pri = UPRI(uq1->uq_thread);
1691 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1693 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1695 uq->uq_pi_blocked = pi;
1697 td->td_flags |= TDF_UPIBLOCKED;
1699 umtx_propagate_priority(td);
1700 mtx_unlock(&umtx_lock);
1701 umtxq_unbusy(&uq->uq_key);
1703 error = umtxq_sleep(uq, wmesg, timo);
1706 mtx_lock(&umtx_lock);
1707 uq->uq_pi_blocked = NULL;
1709 td->td_flags &= ~TDF_UPIBLOCKED;
1711 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1712 umtx_repropagate_priority(pi);
1713 mtx_unlock(&umtx_lock);
1714 umtxq_unlock(&uq->uq_key);
1720 * Add reference count for a PI mutex.
1723 umtx_pi_ref(struct umtx_pi *pi)
1726 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key));
1731 * Decrease reference count for a PI mutex, if the counter
1732 * is decreased to zero, its memory space is freed.
1735 umtx_pi_unref(struct umtx_pi *pi)
1737 struct umtxq_chain *uc;
1739 uc = umtxq_getchain(&pi->pi_key);
1740 UMTXQ_LOCKED_ASSERT(uc);
1741 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1742 if (--pi->pi_refcount == 0) {
1743 mtx_lock(&umtx_lock);
1744 if (pi->pi_owner != NULL)
1746 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1747 ("blocked queue not empty"));
1748 mtx_unlock(&umtx_lock);
1749 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1755 * Find a PI mutex in hash table.
1757 static struct umtx_pi *
1758 umtx_pi_lookup(struct umtx_key *key)
1760 struct umtxq_chain *uc;
1763 uc = umtxq_getchain(key);
1764 UMTXQ_LOCKED_ASSERT(uc);
1766 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1767 if (umtx_key_match(&pi->pi_key, key)) {
1775 * Insert a PI mutex into hash table.
1778 umtx_pi_insert(struct umtx_pi *pi)
1780 struct umtxq_chain *uc;
1782 uc = umtxq_getchain(&pi->pi_key);
1783 UMTXQ_LOCKED_ASSERT(uc);
1784 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1791 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1792 struct _umtx_time *timeout, int try)
1794 struct abs_timeout timo;
1796 struct umtx_pi *pi, *new_pi;
1797 uint32_t id, old_owner, owner, old;
1803 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
1804 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
1808 if (timeout != NULL)
1809 abs_timeout_init2(&timo, timeout);
1811 umtxq_lock(&uq->uq_key);
1812 pi = umtx_pi_lookup(&uq->uq_key);
1814 new_pi = umtx_pi_alloc(M_NOWAIT);
1815 if (new_pi == NULL) {
1816 umtxq_unlock(&uq->uq_key);
1817 new_pi = umtx_pi_alloc(M_WAITOK);
1818 umtxq_lock(&uq->uq_key);
1819 pi = umtx_pi_lookup(&uq->uq_key);
1821 umtx_pi_free(new_pi);
1825 if (new_pi != NULL) {
1826 new_pi->pi_key = uq->uq_key;
1827 umtx_pi_insert(new_pi);
1832 umtxq_unlock(&uq->uq_key);
1835 * Care must be exercised when dealing with umtx structure. It
1836 * can fault on any access.
1840 * Try the uncontested case. This should be done in userland.
1842 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
1843 /* The address was invalid. */
1848 /* The acquire succeeded. */
1850 MPASS(owner == UMUTEX_UNOWNED);
1855 if (owner == UMUTEX_RB_NOTRECOV) {
1856 error = ENOTRECOVERABLE;
1861 * Avoid overwriting a possible error from sleep due
1862 * to the pending signal with suspension check result.
1865 error = thread_check_susp(td, true);
1870 /* If no one owns it but it is contested try to acquire it. */
1871 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
1873 rv = casueword32(&m->m_owner, owner, &owner,
1874 id | UMUTEX_CONTESTED);
1875 /* The address was invalid. */
1882 error = thread_check_susp(td, true);
1888 * If this failed the lock could
1895 MPASS(owner == old_owner);
1896 umtxq_lock(&uq->uq_key);
1897 umtxq_busy(&uq->uq_key);
1898 error = umtx_pi_claim(pi, td);
1899 umtxq_unbusy(&uq->uq_key);
1900 umtxq_unlock(&uq->uq_key);
1903 * Since we're going to return an
1904 * error, restore the m_owner to its
1905 * previous, unowned state to avoid
1906 * compounding the problem.
1908 (void)casuword32(&m->m_owner,
1909 id | UMUTEX_CONTESTED, old_owner);
1911 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD)
1916 if ((owner & ~UMUTEX_CONTESTED) == id) {
1927 * If we caught a signal, we have retried and now
1933 umtxq_lock(&uq->uq_key);
1934 umtxq_busy(&uq->uq_key);
1935 umtxq_unlock(&uq->uq_key);
1938 * Set the contested bit so that a release in user space
1939 * knows to use the system call for unlock. If this fails
1940 * either some one else has acquired the lock or it has been
1943 rv = casueword32(&m->m_owner, owner, &old, owner |
1946 /* The address was invalid. */
1948 umtxq_unbusy_unlocked(&uq->uq_key);
1953 umtxq_unbusy_unlocked(&uq->uq_key);
1954 error = thread_check_susp(td, true);
1959 * The lock changed and we need to retry or we
1960 * lost a race to the thread unlocking the
1961 * umtx. Note that the UMUTEX_RB_OWNERDEAD
1962 * value for owner is impossible there.
1967 umtxq_lock(&uq->uq_key);
1969 /* We set the contested bit, sleep. */
1970 MPASS(old == owner);
1971 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
1972 "umtxpi", timeout == NULL ? NULL : &timo,
1973 (flags & USYNC_PROCESS_SHARED) != 0);
1977 error = thread_check_susp(td, false);
1982 umtxq_lock(&uq->uq_key);
1984 umtxq_unlock(&uq->uq_key);
1986 umtx_key_release(&uq->uq_key);
1991 * Unlock a PI mutex.
1994 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1996 struct umtx_key key;
1997 struct umtx_q *uq_first, *uq_first2, *uq_me;
1998 struct umtx_pi *pi, *pi2;
1999 uint32_t id, new_owner, old, owner;
2000 int count, error, pri;
2006 * Make sure we own this mtx.
2008 error = fueword32(&m->m_owner, &owner);
2012 if ((owner & ~UMUTEX_CONTESTED) != id)
2015 new_owner = umtx_unlock_val(flags, rb);
2017 /* This should be done in userland */
2018 if ((owner & UMUTEX_CONTESTED) == 0) {
2019 error = casueword32(&m->m_owner, owner, &old, new_owner);
2023 error = thread_check_susp(td, true);
2033 /* We should only ever be in here for contested locks */
2034 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2035 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2041 count = umtxq_count_pi(&key, &uq_first);
2042 if (uq_first != NULL) {
2043 mtx_lock(&umtx_lock);
2044 pi = uq_first->uq_pi_blocked;
2045 KASSERT(pi != NULL, ("pi == NULL?"));
2046 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2047 mtx_unlock(&umtx_lock);
2050 umtx_key_release(&key);
2051 /* userland messed the mutex */
2054 uq_me = td->td_umtxq;
2055 if (pi->pi_owner == td)
2057 /* get highest priority thread which is still sleeping. */
2058 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2059 while (uq_first != NULL &&
2060 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2061 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2064 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2065 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2066 if (uq_first2 != NULL) {
2067 if (pri > UPRI(uq_first2->uq_thread))
2068 pri = UPRI(uq_first2->uq_thread);
2072 sched_lend_user_prio(td, pri);
2074 mtx_unlock(&umtx_lock);
2076 umtxq_signal_thread(uq_first);
2078 pi = umtx_pi_lookup(&key);
2080 * A umtx_pi can exist if a signal or timeout removed the
2081 * last waiter from the umtxq, but there is still
2082 * a thread in do_lock_pi() holding the umtx_pi.
2086 * The umtx_pi can be unowned, such as when a thread
2087 * has just entered do_lock_pi(), allocated the
2088 * umtx_pi, and unlocked the umtxq.
2089 * If the current thread owns it, it must disown it.
2091 mtx_lock(&umtx_lock);
2092 if (pi->pi_owner == td)
2094 mtx_unlock(&umtx_lock);
2100 * When unlocking the umtx, it must be marked as unowned if
2101 * there is zero or one thread only waiting for it.
2102 * Otherwise, it must be marked as contested.
2106 new_owner |= UMUTEX_CONTESTED;
2108 error = casueword32(&m->m_owner, owner, &old, new_owner);
2110 error = thread_check_susp(td, false);
2114 umtxq_unbusy_unlocked(&key);
2115 umtx_key_release(&key);
2118 if (error == 0 && old != owner)
2127 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2128 struct _umtx_time *timeout, int try)
2130 struct abs_timeout timo;
2131 struct umtx_q *uq, *uq2;
2135 int error, pri, old_inherited_pri, su, rv;
2139 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2140 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2144 if (timeout != NULL)
2145 abs_timeout_init2(&timo, timeout);
2147 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2149 old_inherited_pri = uq->uq_inherited_pri;
2150 umtxq_lock(&uq->uq_key);
2151 umtxq_busy(&uq->uq_key);
2152 umtxq_unlock(&uq->uq_key);
2154 rv = fueword32(&m->m_ceilings[0], &ceiling);
2159 ceiling = RTP_PRIO_MAX - ceiling;
2160 if (ceiling > RTP_PRIO_MAX) {
2165 mtx_lock(&umtx_lock);
2166 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2167 mtx_unlock(&umtx_lock);
2171 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2172 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2174 if (uq->uq_inherited_pri < UPRI(td))
2175 sched_lend_user_prio(td, uq->uq_inherited_pri);
2178 mtx_unlock(&umtx_lock);
2180 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2181 id | UMUTEX_CONTESTED);
2182 /* The address was invalid. */
2188 MPASS(owner == UMUTEX_CONTESTED);
2193 if (owner == UMUTEX_RB_OWNERDEAD) {
2194 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2195 &owner, id | UMUTEX_CONTESTED);
2201 MPASS(owner == UMUTEX_RB_OWNERDEAD);
2202 error = EOWNERDEAD; /* success */
2207 * rv == 1, only check for suspension if we
2208 * did not already catched a signal. If we
2209 * get an error from the check, the same
2210 * condition is checked by the umtxq_sleep()
2211 * call below, so we should obliterate the
2212 * error to not skip the last loop iteration.
2215 error = thread_check_susp(td, false);
2224 } else if (owner == UMUTEX_RB_NOTRECOV) {
2225 error = ENOTRECOVERABLE;
2232 * If we caught a signal, we have retried and now
2238 umtxq_lock(&uq->uq_key);
2240 umtxq_unbusy(&uq->uq_key);
2241 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2244 umtxq_unlock(&uq->uq_key);
2246 mtx_lock(&umtx_lock);
2247 uq->uq_inherited_pri = old_inherited_pri;
2249 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2250 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2252 if (pri > UPRI(uq2->uq_thread))
2253 pri = UPRI(uq2->uq_thread);
2256 if (pri > uq->uq_inherited_pri)
2257 pri = uq->uq_inherited_pri;
2259 sched_lend_user_prio(td, pri);
2261 mtx_unlock(&umtx_lock);
2264 if (error != 0 && error != EOWNERDEAD) {
2265 mtx_lock(&umtx_lock);
2266 uq->uq_inherited_pri = old_inherited_pri;
2268 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2269 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2271 if (pri > UPRI(uq2->uq_thread))
2272 pri = UPRI(uq2->uq_thread);
2275 if (pri > uq->uq_inherited_pri)
2276 pri = uq->uq_inherited_pri;
2278 sched_lend_user_prio(td, pri);
2280 mtx_unlock(&umtx_lock);
2284 umtxq_unbusy_unlocked(&uq->uq_key);
2285 umtx_key_release(&uq->uq_key);
2290 * Unlock a PP mutex.
2293 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2295 struct umtx_key key;
2296 struct umtx_q *uq, *uq2;
2298 uint32_t id, owner, rceiling;
2299 int error, pri, new_inherited_pri, su;
2303 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2306 * Make sure we own this mtx.
2308 error = fueword32(&m->m_owner, &owner);
2312 if ((owner & ~UMUTEX_CONTESTED) != id)
2315 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2320 new_inherited_pri = PRI_MAX;
2322 rceiling = RTP_PRIO_MAX - rceiling;
2323 if (rceiling > RTP_PRIO_MAX)
2325 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2328 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2329 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2336 * For priority protected mutex, always set unlocked state
2337 * to UMUTEX_CONTESTED, so that userland always enters kernel
2338 * to lock the mutex, it is necessary because thread priority
2339 * has to be adjusted for such mutex.
2341 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2346 umtxq_signal(&key, 1);
2353 mtx_lock(&umtx_lock);
2355 uq->uq_inherited_pri = new_inherited_pri;
2357 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2358 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2360 if (pri > UPRI(uq2->uq_thread))
2361 pri = UPRI(uq2->uq_thread);
2364 if (pri > uq->uq_inherited_pri)
2365 pri = uq->uq_inherited_pri;
2367 sched_lend_user_prio(td, pri);
2369 mtx_unlock(&umtx_lock);
2371 umtx_key_release(&key);
2376 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2377 uint32_t *old_ceiling)
2380 uint32_t flags, id, owner, save_ceiling;
2383 error = fueword32(&m->m_flags, &flags);
2386 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2388 if (ceiling > RTP_PRIO_MAX)
2392 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2393 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2397 umtxq_lock(&uq->uq_key);
2398 umtxq_busy(&uq->uq_key);
2399 umtxq_unlock(&uq->uq_key);
2401 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2407 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2408 id | UMUTEX_CONTESTED);
2415 MPASS(owner == UMUTEX_CONTESTED);
2416 rv = suword32(&m->m_ceilings[0], ceiling);
2417 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2418 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2422 if ((owner & ~UMUTEX_CONTESTED) == id) {
2423 rv = suword32(&m->m_ceilings[0], ceiling);
2424 error = rv == 0 ? 0 : EFAULT;
2428 if (owner == UMUTEX_RB_OWNERDEAD) {
2431 } else if (owner == UMUTEX_RB_NOTRECOV) {
2432 error = ENOTRECOVERABLE;
2437 * If we caught a signal, we have retried and now
2444 * We set the contested bit, sleep. Otherwise the lock changed
2445 * and we need to retry or we lost a race to the thread
2446 * unlocking the umtx.
2448 umtxq_lock(&uq->uq_key);
2450 umtxq_unbusy(&uq->uq_key);
2451 error = umtxq_sleep(uq, "umtxpp", NULL);
2453 umtxq_unlock(&uq->uq_key);
2455 umtxq_lock(&uq->uq_key);
2457 umtxq_signal(&uq->uq_key, INT_MAX);
2458 umtxq_unbusy(&uq->uq_key);
2459 umtxq_unlock(&uq->uq_key);
2460 umtx_key_release(&uq->uq_key);
2461 if (error == 0 && old_ceiling != NULL) {
2462 rv = suword32(old_ceiling, save_ceiling);
2463 error = rv == 0 ? 0 : EFAULT;
2469 * Lock a userland POSIX mutex.
2472 do_lock_umutex(struct thread *td, struct umutex *m,
2473 struct _umtx_time *timeout, int mode)
2478 error = fueword32(&m->m_flags, &flags);
2482 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2484 error = do_lock_normal(td, m, flags, timeout, mode);
2486 case UMUTEX_PRIO_INHERIT:
2487 error = do_lock_pi(td, m, flags, timeout, mode);
2489 case UMUTEX_PRIO_PROTECT:
2490 error = do_lock_pp(td, m, flags, timeout, mode);
2495 if (timeout == NULL) {
2496 if (error == EINTR && mode != _UMUTEX_WAIT)
2499 /* Timed-locking is not restarted. */
2500 if (error == ERESTART)
2507 * Unlock a userland POSIX mutex.
2510 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2515 error = fueword32(&m->m_flags, &flags);
2519 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2521 return (do_unlock_normal(td, m, flags, rb));
2522 case UMUTEX_PRIO_INHERIT:
2523 return (do_unlock_pi(td, m, flags, rb));
2524 case UMUTEX_PRIO_PROTECT:
2525 return (do_unlock_pp(td, m, flags, rb));
2532 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2533 struct timespec *timeout, u_long wflags)
2535 struct abs_timeout timo;
2537 uint32_t flags, clockid, hasw;
2541 error = fueword32(&cv->c_flags, &flags);
2544 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2548 if ((wflags & CVWAIT_CLOCKID) != 0) {
2549 error = fueword32(&cv->c_clockid, &clockid);
2551 umtx_key_release(&uq->uq_key);
2554 if (clockid < CLOCK_REALTIME ||
2555 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2556 /* hmm, only HW clock id will work. */
2557 umtx_key_release(&uq->uq_key);
2561 clockid = CLOCK_REALTIME;
2564 umtxq_lock(&uq->uq_key);
2565 umtxq_busy(&uq->uq_key);
2567 umtxq_unlock(&uq->uq_key);
2570 * Set c_has_waiters to 1 before releasing user mutex, also
2571 * don't modify cache line when unnecessary.
2573 error = fueword32(&cv->c_has_waiters, &hasw);
2574 if (error == 0 && hasw == 0)
2575 suword32(&cv->c_has_waiters, 1);
2577 umtxq_unbusy_unlocked(&uq->uq_key);
2579 error = do_unlock_umutex(td, m, false);
2581 if (timeout != NULL)
2582 abs_timeout_init(&timo, clockid, (wflags & CVWAIT_ABSTIME) != 0,
2585 umtxq_lock(&uq->uq_key);
2587 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2591 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2595 * This must be timeout,interrupted by signal or
2596 * surprious wakeup, clear c_has_waiter flag when
2599 umtxq_busy(&uq->uq_key);
2600 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2601 int oldlen = uq->uq_cur_queue->length;
2604 umtxq_unlock(&uq->uq_key);
2605 suword32(&cv->c_has_waiters, 0);
2606 umtxq_lock(&uq->uq_key);
2609 umtxq_unbusy(&uq->uq_key);
2610 if (error == ERESTART)
2614 umtxq_unlock(&uq->uq_key);
2615 umtx_key_release(&uq->uq_key);
2620 * Signal a userland condition variable.
2623 do_cv_signal(struct thread *td, struct ucond *cv)
2625 struct umtx_key key;
2626 int error, cnt, nwake;
2629 error = fueword32(&cv->c_flags, &flags);
2632 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2636 cnt = umtxq_count(&key);
2637 nwake = umtxq_signal(&key, 1);
2640 error = suword32(&cv->c_has_waiters, 0);
2647 umtx_key_release(&key);
2652 do_cv_broadcast(struct thread *td, struct ucond *cv)
2654 struct umtx_key key;
2658 error = fueword32(&cv->c_flags, &flags);
2661 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2666 umtxq_signal(&key, INT_MAX);
2669 error = suword32(&cv->c_has_waiters, 0);
2673 umtxq_unbusy_unlocked(&key);
2675 umtx_key_release(&key);
2680 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag,
2681 struct _umtx_time *timeout)
2683 struct abs_timeout timo;
2685 uint32_t flags, wrflags;
2686 int32_t state, oldstate;
2687 int32_t blocked_readers;
2688 int error, error1, rv;
2691 error = fueword32(&rwlock->rw_flags, &flags);
2694 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2698 if (timeout != NULL)
2699 abs_timeout_init2(&timo, timeout);
2701 wrflags = URWLOCK_WRITE_OWNER;
2702 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2703 wrflags |= URWLOCK_WRITE_WAITERS;
2706 rv = fueword32(&rwlock->rw_state, &state);
2708 umtx_key_release(&uq->uq_key);
2712 /* try to lock it */
2713 while (!(state & wrflags)) {
2714 if (__predict_false(URWLOCK_READER_COUNT(state) ==
2715 URWLOCK_MAX_READERS)) {
2716 umtx_key_release(&uq->uq_key);
2719 rv = casueword32(&rwlock->rw_state, state,
2720 &oldstate, state + 1);
2722 umtx_key_release(&uq->uq_key);
2726 MPASS(oldstate == state);
2727 umtx_key_release(&uq->uq_key);
2730 error = thread_check_susp(td, true);
2739 /* grab monitor lock */
2740 umtxq_lock(&uq->uq_key);
2741 umtxq_busy(&uq->uq_key);
2742 umtxq_unlock(&uq->uq_key);
2745 * re-read the state, in case it changed between the try-lock above
2746 * and the check below
2748 rv = fueword32(&rwlock->rw_state, &state);
2752 /* set read contention bit */
2753 while (error == 0 && (state & wrflags) &&
2754 !(state & URWLOCK_READ_WAITERS)) {
2755 rv = casueword32(&rwlock->rw_state, state,
2756 &oldstate, state | URWLOCK_READ_WAITERS);
2762 MPASS(oldstate == state);
2766 error = thread_check_susp(td, false);
2771 umtxq_unbusy_unlocked(&uq->uq_key);
2775 /* state is changed while setting flags, restart */
2776 if (!(state & wrflags)) {
2777 umtxq_unbusy_unlocked(&uq->uq_key);
2778 error = thread_check_susp(td, true);
2786 * Contention bit is set, before sleeping, increase
2787 * read waiter count.
2789 rv = fueword32(&rwlock->rw_blocked_readers,
2792 umtxq_unbusy_unlocked(&uq->uq_key);
2796 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2798 while (state & wrflags) {
2799 umtxq_lock(&uq->uq_key);
2801 umtxq_unbusy(&uq->uq_key);
2803 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2806 umtxq_busy(&uq->uq_key);
2808 umtxq_unlock(&uq->uq_key);
2811 rv = fueword32(&rwlock->rw_state, &state);
2818 /* decrease read waiter count, and may clear read contention bit */
2819 rv = fueword32(&rwlock->rw_blocked_readers,
2822 umtxq_unbusy_unlocked(&uq->uq_key);
2826 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2827 if (blocked_readers == 1) {
2828 rv = fueword32(&rwlock->rw_state, &state);
2830 umtxq_unbusy_unlocked(&uq->uq_key);
2835 rv = casueword32(&rwlock->rw_state, state,
2836 &oldstate, state & ~URWLOCK_READ_WAITERS);
2842 MPASS(oldstate == state);
2846 error1 = thread_check_susp(td, false);
2855 umtxq_unbusy_unlocked(&uq->uq_key);
2859 umtx_key_release(&uq->uq_key);
2860 if (error == ERESTART)
2866 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2868 struct abs_timeout timo;
2871 int32_t state, oldstate;
2872 int32_t blocked_writers;
2873 int32_t blocked_readers;
2874 int error, error1, rv;
2877 error = fueword32(&rwlock->rw_flags, &flags);
2880 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2884 if (timeout != NULL)
2885 abs_timeout_init2(&timo, timeout);
2887 blocked_readers = 0;
2889 rv = fueword32(&rwlock->rw_state, &state);
2891 umtx_key_release(&uq->uq_key);
2894 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
2895 URWLOCK_READER_COUNT(state) == 0) {
2896 rv = casueword32(&rwlock->rw_state, state,
2897 &oldstate, state | URWLOCK_WRITE_OWNER);
2899 umtx_key_release(&uq->uq_key);
2903 MPASS(oldstate == state);
2904 umtx_key_release(&uq->uq_key);
2908 error = thread_check_susp(td, true);
2914 if ((state & (URWLOCK_WRITE_OWNER |
2915 URWLOCK_WRITE_WAITERS)) == 0 &&
2916 blocked_readers != 0) {
2917 umtxq_lock(&uq->uq_key);
2918 umtxq_busy(&uq->uq_key);
2919 umtxq_signal_queue(&uq->uq_key, INT_MAX,
2921 umtxq_unbusy(&uq->uq_key);
2922 umtxq_unlock(&uq->uq_key);
2928 /* grab monitor lock */
2929 umtxq_lock(&uq->uq_key);
2930 umtxq_busy(&uq->uq_key);
2931 umtxq_unlock(&uq->uq_key);
2934 * Re-read the state, in case it changed between the
2935 * try-lock above and the check below.
2937 rv = fueword32(&rwlock->rw_state, &state);
2941 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
2942 URWLOCK_READER_COUNT(state) != 0) &&
2943 (state & URWLOCK_WRITE_WAITERS) == 0) {
2944 rv = casueword32(&rwlock->rw_state, state,
2945 &oldstate, state | URWLOCK_WRITE_WAITERS);
2951 MPASS(oldstate == state);
2955 error = thread_check_susp(td, false);
2960 umtxq_unbusy_unlocked(&uq->uq_key);
2964 if ((state & URWLOCK_WRITE_OWNER) == 0 &&
2965 URWLOCK_READER_COUNT(state) == 0) {
2966 umtxq_unbusy_unlocked(&uq->uq_key);
2967 error = thread_check_susp(td, false);
2973 rv = fueword32(&rwlock->rw_blocked_writers,
2976 umtxq_unbusy_unlocked(&uq->uq_key);
2980 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1);
2982 while ((state & URWLOCK_WRITE_OWNER) ||
2983 URWLOCK_READER_COUNT(state) != 0) {
2984 umtxq_lock(&uq->uq_key);
2985 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2986 umtxq_unbusy(&uq->uq_key);
2988 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2991 umtxq_busy(&uq->uq_key);
2992 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2993 umtxq_unlock(&uq->uq_key);
2996 rv = fueword32(&rwlock->rw_state, &state);
3003 rv = fueword32(&rwlock->rw_blocked_writers,
3006 umtxq_unbusy_unlocked(&uq->uq_key);
3010 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
3011 if (blocked_writers == 1) {
3012 rv = fueword32(&rwlock->rw_state, &state);
3014 umtxq_unbusy_unlocked(&uq->uq_key);
3019 rv = casueword32(&rwlock->rw_state, state,
3020 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
3026 MPASS(oldstate == state);
3030 error1 = thread_check_susp(td, false);
3032 * We are leaving the URWLOCK_WRITE_WAITERS
3033 * behind, but this should not harm the
3042 rv = fueword32(&rwlock->rw_blocked_readers,
3045 umtxq_unbusy_unlocked(&uq->uq_key);
3050 blocked_readers = 0;
3052 umtxq_unbusy_unlocked(&uq->uq_key);
3055 umtx_key_release(&uq->uq_key);
3056 if (error == ERESTART)
3062 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3066 int32_t state, oldstate;
3067 int error, rv, q, count;
3070 error = fueword32(&rwlock->rw_flags, &flags);
3073 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3077 error = fueword32(&rwlock->rw_state, &state);
3082 if (state & URWLOCK_WRITE_OWNER) {
3084 rv = casueword32(&rwlock->rw_state, state,
3085 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3092 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3096 error = thread_check_susp(td, true);
3102 } else if (URWLOCK_READER_COUNT(state) != 0) {
3104 rv = casueword32(&rwlock->rw_state, state,
3105 &oldstate, state - 1);
3112 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3116 error = thread_check_susp(td, true);
3129 if (!(flags & URWLOCK_PREFER_READER)) {
3130 if (state & URWLOCK_WRITE_WAITERS) {
3132 q = UMTX_EXCLUSIVE_QUEUE;
3133 } else if (state & URWLOCK_READ_WAITERS) {
3135 q = UMTX_SHARED_QUEUE;
3138 if (state & URWLOCK_READ_WAITERS) {
3140 q = UMTX_SHARED_QUEUE;
3141 } else if (state & URWLOCK_WRITE_WAITERS) {
3143 q = UMTX_EXCLUSIVE_QUEUE;
3148 umtxq_lock(&uq->uq_key);
3149 umtxq_busy(&uq->uq_key);
3150 umtxq_signal_queue(&uq->uq_key, count, q);
3151 umtxq_unbusy(&uq->uq_key);
3152 umtxq_unlock(&uq->uq_key);
3155 umtx_key_release(&uq->uq_key);
3159 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3161 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3163 struct abs_timeout timo;
3165 uint32_t flags, count, count1;
3169 error = fueword32(&sem->_flags, &flags);
3172 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3176 if (timeout != NULL)
3177 abs_timeout_init2(&timo, timeout);
3180 umtxq_lock(&uq->uq_key);
3181 umtxq_busy(&uq->uq_key);
3183 umtxq_unlock(&uq->uq_key);
3184 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3186 rv1 = fueword32(&sem->_count, &count);
3187 if (rv == -1 || (rv == 0 && (rv1 == -1 || count != 0)) ||
3188 (rv == 1 && count1 == 0)) {
3189 umtxq_lock(&uq->uq_key);
3190 umtxq_unbusy(&uq->uq_key);
3192 umtxq_unlock(&uq->uq_key);
3194 rv = thread_check_susp(td, true);
3202 error = rv == -1 ? EFAULT : 0;
3205 umtxq_lock(&uq->uq_key);
3206 umtxq_unbusy(&uq->uq_key);
3208 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3210 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3214 /* A relative timeout cannot be restarted. */
3215 if (error == ERESTART && timeout != NULL &&
3216 (timeout->_flags & UMTX_ABSTIME) == 0)
3219 umtxq_unlock(&uq->uq_key);
3221 umtx_key_release(&uq->uq_key);
3226 * Signal a userland semaphore.
3229 do_sem_wake(struct thread *td, struct _usem *sem)
3231 struct umtx_key key;
3235 error = fueword32(&sem->_flags, &flags);
3238 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3242 cnt = umtxq_count(&key);
3245 * Check if count is greater than 0, this means the memory is
3246 * still being referenced by user code, so we can safely
3247 * update _has_waiters flag.
3251 error = suword32(&sem->_has_waiters, 0);
3256 umtxq_signal(&key, 1);
3260 umtx_key_release(&key);
3266 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3268 struct abs_timeout timo;
3270 uint32_t count, flags;
3274 flags = fuword32(&sem->_flags);
3275 if (timeout != NULL)
3276 abs_timeout_init2(&timo, timeout);
3279 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3282 umtxq_lock(&uq->uq_key);
3283 umtxq_busy(&uq->uq_key);
3285 umtxq_unlock(&uq->uq_key);
3286 rv = fueword32(&sem->_count, &count);
3288 umtxq_lock(&uq->uq_key);
3289 umtxq_unbusy(&uq->uq_key);
3291 umtxq_unlock(&uq->uq_key);
3292 umtx_key_release(&uq->uq_key);
3296 if (USEM_COUNT(count) != 0) {
3297 umtxq_lock(&uq->uq_key);
3298 umtxq_unbusy(&uq->uq_key);
3300 umtxq_unlock(&uq->uq_key);
3301 umtx_key_release(&uq->uq_key);
3304 if (count == USEM_HAS_WAITERS)
3306 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3309 umtxq_lock(&uq->uq_key);
3310 umtxq_unbusy(&uq->uq_key);
3312 umtxq_unlock(&uq->uq_key);
3313 umtx_key_release(&uq->uq_key);
3316 rv = thread_check_susp(td, true);
3321 umtxq_lock(&uq->uq_key);
3322 umtxq_unbusy(&uq->uq_key);
3324 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3326 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3330 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3331 /* A relative timeout cannot be restarted. */
3332 if (error == ERESTART)
3334 if (error == EINTR) {
3335 abs_timeout_update(&timo);
3336 timespecsub(&timo.end, &timo.cur,
3337 &timeout->_timeout);
3341 umtxq_unlock(&uq->uq_key);
3342 umtx_key_release(&uq->uq_key);
3347 * Signal a userland semaphore.
3350 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3352 struct umtx_key key;
3354 uint32_t count, flags;
3356 rv = fueword32(&sem->_flags, &flags);
3359 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3363 cnt = umtxq_count(&key);
3366 * If this was the last sleeping thread, clear the waiters
3371 rv = fueword32(&sem->_count, &count);
3372 while (rv != -1 && count & USEM_HAS_WAITERS) {
3373 rv = casueword32(&sem->_count, count, &count,
3374 count & ~USEM_HAS_WAITERS);
3376 rv = thread_check_susp(td, true);
3389 umtxq_signal(&key, 1);
3393 umtx_key_release(&key);
3398 umtx_copyin_timeout(const void *uaddr, struct timespec *tsp)
3402 error = copyin(uaddr, tsp, sizeof(*tsp));
3404 if (tsp->tv_sec < 0 ||
3405 tsp->tv_nsec >= 1000000000 ||
3413 umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp)
3417 if (size <= sizeof(tp->_timeout)) {
3418 tp->_clockid = CLOCK_REALTIME;
3420 error = copyin(uaddr, &tp->_timeout, sizeof(tp->_timeout));
3422 error = copyin(uaddr, tp, sizeof(*tp));
3425 if (tp->_timeout.tv_sec < 0 ||
3426 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3432 umtx_copyin_robust_lists(const void *uaddr, size_t size,
3433 struct umtx_robust_lists_params *rb)
3436 if (size > sizeof(*rb))
3438 return (copyin(uaddr, rb, size));
3442 umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp)
3446 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
3447 * and we're only called if sz >= sizeof(timespec) as supplied in the
3450 KASSERT(sz >= sizeof(*tsp),
3451 ("umtx_copyops specifies incorrect sizes"));
3453 return (copyout(tsp, uaddr, sizeof(*tsp)));
3457 __umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap,
3458 const struct umtx_copyops *ops __unused)
3461 return (EOPNOTSUPP);
3465 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap,
3466 const struct umtx_copyops *ops)
3468 struct _umtx_time timeout, *tm_p;
3471 if (uap->uaddr2 == NULL)
3474 error = ops->copyin_umtx_time(
3475 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3480 return (do_wait(td, uap->obj, uap->val, tm_p, ops->compat32, 0));
3484 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap,
3485 const struct umtx_copyops *ops)
3487 struct _umtx_time timeout, *tm_p;
3490 if (uap->uaddr2 == NULL)
3493 error = ops->copyin_umtx_time(
3494 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3499 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3503 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap,
3504 const struct umtx_copyops *ops)
3506 struct _umtx_time *tm_p, timeout;
3509 if (uap->uaddr2 == NULL)
3512 error = ops->copyin_umtx_time(
3513 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3518 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3522 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap,
3523 const struct umtx_copyops *ops __unused)
3526 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3529 #define BATCH_SIZE 128
3531 __umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap)
3533 char *uaddrs[BATCH_SIZE], **upp;
3534 int count, error, i, pos, tocopy;
3536 upp = (char **)uap->obj;
3538 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3540 tocopy = MIN(count, BATCH_SIZE);
3541 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3544 for (i = 0; i < tocopy; ++i) {
3545 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3553 __umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3555 uint32_t uaddrs[BATCH_SIZE], *upp;
3556 int count, error, i, pos, tocopy;
3558 upp = (uint32_t *)uap->obj;
3560 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3562 tocopy = MIN(count, BATCH_SIZE);
3563 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
3566 for (i = 0; i < tocopy; ++i) {
3567 kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i],
3576 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap,
3577 const struct umtx_copyops *ops)
3581 return (__umtx_op_nwake_private_compat32(td, uap));
3582 return (__umtx_op_nwake_private_native(td, uap));
3586 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap,
3587 const struct umtx_copyops *ops __unused)
3590 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3594 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap,
3595 const struct umtx_copyops *ops)
3597 struct _umtx_time *tm_p, timeout;
3600 /* Allow a null timespec (wait forever). */
3601 if (uap->uaddr2 == NULL)
3604 error = ops->copyin_umtx_time(
3605 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3610 return (do_lock_umutex(td, uap->obj, tm_p, 0));
3614 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap,
3615 const struct umtx_copyops *ops __unused)
3618 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
3622 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap,
3623 const struct umtx_copyops *ops)
3625 struct _umtx_time *tm_p, timeout;
3628 /* Allow a null timespec (wait forever). */
3629 if (uap->uaddr2 == NULL)
3632 error = ops->copyin_umtx_time(
3633 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3638 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
3642 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap,
3643 const struct umtx_copyops *ops __unused)
3646 return (do_wake_umutex(td, uap->obj));
3650 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap,
3651 const struct umtx_copyops *ops __unused)
3654 return (do_unlock_umutex(td, uap->obj, false));
3658 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap,
3659 const struct umtx_copyops *ops __unused)
3662 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
3666 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap,
3667 const struct umtx_copyops *ops)
3669 struct timespec *ts, timeout;
3672 /* Allow a null timespec (wait forever). */
3673 if (uap->uaddr2 == NULL)
3676 error = ops->copyin_timeout(uap->uaddr2, &timeout);
3681 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3685 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap,
3686 const struct umtx_copyops *ops __unused)
3689 return (do_cv_signal(td, uap->obj));
3693 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap,
3694 const struct umtx_copyops *ops __unused)
3697 return (do_cv_broadcast(td, uap->obj));
3701 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap,
3702 const struct umtx_copyops *ops)
3704 struct _umtx_time timeout;
3707 /* Allow a null timespec (wait forever). */
3708 if (uap->uaddr2 == NULL) {
3709 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3711 error = ops->copyin_umtx_time(uap->uaddr2,
3712 (size_t)uap->uaddr1, &timeout);
3715 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3721 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap,
3722 const struct umtx_copyops *ops)
3724 struct _umtx_time timeout;
3727 /* Allow a null timespec (wait forever). */
3728 if (uap->uaddr2 == NULL) {
3729 error = do_rw_wrlock(td, uap->obj, 0);
3731 error = ops->copyin_umtx_time(uap->uaddr2,
3732 (size_t)uap->uaddr1, &timeout);
3736 error = do_rw_wrlock(td, uap->obj, &timeout);
3742 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap,
3743 const struct umtx_copyops *ops __unused)
3746 return (do_rw_unlock(td, uap->obj));
3749 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3751 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap,
3752 const struct umtx_copyops *ops)
3754 struct _umtx_time *tm_p, timeout;
3757 /* Allow a null timespec (wait forever). */
3758 if (uap->uaddr2 == NULL)
3761 error = ops->copyin_umtx_time(
3762 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3767 return (do_sem_wait(td, uap->obj, tm_p));
3771 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap,
3772 const struct umtx_copyops *ops __unused)
3775 return (do_sem_wake(td, uap->obj));
3780 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap,
3781 const struct umtx_copyops *ops __unused)
3784 return (do_wake2_umutex(td, uap->obj, uap->val));
3788 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap,
3789 const struct umtx_copyops *ops)
3791 struct _umtx_time *tm_p, timeout;
3795 /* Allow a null timespec (wait forever). */
3796 if (uap->uaddr2 == NULL) {
3800 uasize = (size_t)uap->uaddr1;
3801 error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout);
3806 error = do_sem2_wait(td, uap->obj, tm_p);
3807 if (error == EINTR && uap->uaddr2 != NULL &&
3808 (timeout._flags & UMTX_ABSTIME) == 0 &&
3809 uasize >= ops->umtx_time_sz + ops->timespec_sz) {
3810 error = ops->copyout_timeout(
3811 (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz),
3812 uasize - ops->umtx_time_sz, &timeout._timeout);
3822 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap,
3823 const struct umtx_copyops *ops __unused)
3826 return (do_sem2_wake(td, uap->obj));
3829 #define USHM_OBJ_UMTX(o) \
3830 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
3832 #define USHMF_REG_LINKED 0x0001
3833 #define USHMF_OBJ_LINKED 0x0002
3834 struct umtx_shm_reg {
3835 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
3836 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
3837 struct umtx_key ushm_key;
3838 struct ucred *ushm_cred;
3839 struct shmfd *ushm_obj;
3844 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
3845 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
3847 static uma_zone_t umtx_shm_reg_zone;
3848 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
3849 static struct mtx umtx_shm_lock;
3850 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
3851 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
3853 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
3856 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
3858 struct umtx_shm_reg_head d;
3859 struct umtx_shm_reg *reg, *reg1;
3862 mtx_lock(&umtx_shm_lock);
3863 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
3864 mtx_unlock(&umtx_shm_lock);
3865 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
3866 TAILQ_REMOVE(&d, reg, ushm_reg_link);
3867 umtx_shm_free_reg(reg);
3871 static struct task umtx_shm_reg_delfree_task =
3872 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
3874 static struct umtx_shm_reg *
3875 umtx_shm_find_reg_locked(const struct umtx_key *key)
3877 struct umtx_shm_reg *reg;
3878 struct umtx_shm_reg_head *reg_head;
3880 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
3881 mtx_assert(&umtx_shm_lock, MA_OWNED);
3882 reg_head = &umtx_shm_registry[key->hash];
3883 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
3884 KASSERT(reg->ushm_key.shared,
3885 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
3886 if (reg->ushm_key.info.shared.object ==
3887 key->info.shared.object &&
3888 reg->ushm_key.info.shared.offset ==
3889 key->info.shared.offset) {
3890 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
3891 KASSERT(reg->ushm_refcnt > 0,
3892 ("reg %p refcnt 0 onlist", reg));
3893 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
3894 ("reg %p not linked", reg));
3902 static struct umtx_shm_reg *
3903 umtx_shm_find_reg(const struct umtx_key *key)
3905 struct umtx_shm_reg *reg;
3907 mtx_lock(&umtx_shm_lock);
3908 reg = umtx_shm_find_reg_locked(key);
3909 mtx_unlock(&umtx_shm_lock);
3914 umtx_shm_free_reg(struct umtx_shm_reg *reg)
3917 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
3918 crfree(reg->ushm_cred);
3919 shm_drop(reg->ushm_obj);
3920 uma_zfree(umtx_shm_reg_zone, reg);
3924 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
3928 mtx_assert(&umtx_shm_lock, MA_OWNED);
3929 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
3931 res = reg->ushm_refcnt == 0;
3933 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
3934 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
3935 reg, ushm_reg_link);
3936 reg->ushm_flags &= ~USHMF_REG_LINKED;
3938 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
3939 LIST_REMOVE(reg, ushm_obj_link);
3940 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
3947 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
3953 object = reg->ushm_obj->shm_object;
3954 VM_OBJECT_WLOCK(object);
3955 object->flags |= OBJ_UMTXDEAD;
3956 VM_OBJECT_WUNLOCK(object);
3958 mtx_lock(&umtx_shm_lock);
3959 dofree = umtx_shm_unref_reg_locked(reg, force);
3960 mtx_unlock(&umtx_shm_lock);
3962 umtx_shm_free_reg(reg);
3966 umtx_shm_object_init(vm_object_t object)
3969 LIST_INIT(USHM_OBJ_UMTX(object));
3973 umtx_shm_object_terminated(vm_object_t object)
3975 struct umtx_shm_reg *reg, *reg1;
3979 mtx_lock(&umtx_shm_lock);
3980 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
3981 if (umtx_shm_unref_reg_locked(reg, true)) {
3982 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
3987 mtx_unlock(&umtx_shm_lock);
3989 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
3993 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
3994 struct umtx_shm_reg **res)
3996 struct umtx_shm_reg *reg, *reg1;
4000 reg = umtx_shm_find_reg(key);
4005 cred = td->td_ucred;
4006 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
4008 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
4009 reg->ushm_refcnt = 1;
4010 bcopy(key, ®->ushm_key, sizeof(*key));
4011 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR);
4012 reg->ushm_cred = crhold(cred);
4013 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
4015 umtx_shm_free_reg(reg);
4018 mtx_lock(&umtx_shm_lock);
4019 reg1 = umtx_shm_find_reg_locked(key);
4021 mtx_unlock(&umtx_shm_lock);
4022 umtx_shm_free_reg(reg);
4027 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
4028 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
4030 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
4031 mtx_unlock(&umtx_shm_lock);
4037 umtx_shm_alive(struct thread *td, void *addr)
4040 vm_map_entry_t entry;
4047 map = &td->td_proc->p_vmspace->vm_map;
4048 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
4049 &object, &pindex, &prot, &wired);
4050 if (res != KERN_SUCCESS)
4055 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
4056 vm_map_lookup_done(map, entry);
4065 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
4066 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4067 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
4068 for (i = 0; i < nitems(umtx_shm_registry); i++)
4069 TAILQ_INIT(&umtx_shm_registry[i]);
4073 umtx_shm(struct thread *td, void *addr, u_int flags)
4075 struct umtx_key key;
4076 struct umtx_shm_reg *reg;
4080 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
4081 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
4083 if ((flags & UMTX_SHM_ALIVE) != 0)
4084 return (umtx_shm_alive(td, addr));
4085 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
4088 KASSERT(key.shared == 1, ("non-shared key"));
4089 if ((flags & UMTX_SHM_CREAT) != 0) {
4090 error = umtx_shm_create_reg(td, &key, ®);
4092 reg = umtx_shm_find_reg(&key);
4096 umtx_key_release(&key);
4099 KASSERT(reg != NULL, ("no reg"));
4100 if ((flags & UMTX_SHM_DESTROY) != 0) {
4101 umtx_shm_unref_reg(reg, true);
4105 error = mac_posixshm_check_open(td->td_ucred,
4106 reg->ushm_obj, FFLAGS(O_RDWR));
4109 error = shm_access(reg->ushm_obj, td->td_ucred,
4113 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
4115 shm_hold(reg->ushm_obj);
4116 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
4118 td->td_retval[0] = fd;
4122 umtx_shm_unref_reg(reg, false);
4127 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap,
4128 const struct umtx_copyops *ops __unused)
4131 return (umtx_shm(td, uap->uaddr1, uap->val));
4135 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap,
4136 const struct umtx_copyops *ops)
4138 struct umtx_robust_lists_params rb;
4141 if (ops->compat32) {
4142 if ((td->td_pflags2 & TDP2_COMPAT32RB) == 0 &&
4143 (td->td_rb_list != 0 || td->td_rbp_list != 0 ||
4144 td->td_rb_inact != 0))
4146 } else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0) {
4150 bzero(&rb, sizeof(rb));
4151 error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb);
4156 td->td_pflags2 |= TDP2_COMPAT32RB;
4158 td->td_rb_list = rb.robust_list_offset;
4159 td->td_rbp_list = rb.robust_priv_list_offset;
4160 td->td_rb_inact = rb.robust_inact_offset;
4164 #if defined(__i386__) || defined(__amd64__)
4166 * Provide the standard 32-bit definitions for x86, since native/compat32 use a
4167 * 32-bit time_t there. Other architectures just need the i386 definitions
4168 * along with their standard compat32.
4170 struct timespecx32 {
4175 struct umtx_timex32 {
4176 struct timespecx32 _timeout;
4182 #define timespeci386 timespec32
4183 #define umtx_timei386 umtx_time32
4185 #else /* !__i386__ && !__amd64__ */
4186 /* 32-bit architectures can emulate i386, so define these almost everywhere. */
4187 struct timespeci386 {
4192 struct umtx_timei386 {
4193 struct timespeci386 _timeout;
4198 #if defined(__LP64__)
4199 #define timespecx32 timespec32
4200 #define umtx_timex32 umtx_time32
4205 umtx_copyin_robust_lists32(const void *uaddr, size_t size,
4206 struct umtx_robust_lists_params *rbp)
4208 struct umtx_robust_lists_params_compat32 rb32;
4211 if (size > sizeof(rb32))
4213 bzero(&rb32, sizeof(rb32));
4214 error = copyin(uaddr, &rb32, size);
4217 CP(rb32, *rbp, robust_list_offset);
4218 CP(rb32, *rbp, robust_priv_list_offset);
4219 CP(rb32, *rbp, robust_inact_offset);
4225 umtx_copyin_timeouti386(const void *uaddr, struct timespec *tsp)
4227 struct timespeci386 ts32;
4230 error = copyin(uaddr, &ts32, sizeof(ts32));
4232 if (ts32.tv_sec < 0 ||
4233 ts32.tv_nsec >= 1000000000 ||
4237 CP(ts32, *tsp, tv_sec);
4238 CP(ts32, *tsp, tv_nsec);
4245 umtx_copyin_umtx_timei386(const void *uaddr, size_t size, struct _umtx_time *tp)
4247 struct umtx_timei386 t32;
4250 t32._clockid = CLOCK_REALTIME;
4252 if (size <= sizeof(t32._timeout))
4253 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4255 error = copyin(uaddr, &t32, sizeof(t32));
4258 if (t32._timeout.tv_sec < 0 ||
4259 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4261 TS_CP(t32, *tp, _timeout);
4262 CP(t32, *tp, _flags);
4263 CP(t32, *tp, _clockid);
4268 umtx_copyout_timeouti386(void *uaddr, size_t sz, struct timespec *tsp)
4270 struct timespeci386 remain32 = {
4271 .tv_sec = tsp->tv_sec,
4272 .tv_nsec = tsp->tv_nsec,
4276 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4277 * and we're only called if sz >= sizeof(timespec) as supplied in the
4280 KASSERT(sz >= sizeof(remain32),
4281 ("umtx_copyops specifies incorrect sizes"));
4283 return (copyout(&remain32, uaddr, sizeof(remain32)));
4285 #endif /* !__i386__ */
4287 #if defined(__i386__) || defined(__LP64__)
4289 umtx_copyin_timeoutx32(const void *uaddr, struct timespec *tsp)
4291 struct timespecx32 ts32;
4294 error = copyin(uaddr, &ts32, sizeof(ts32));
4296 if (ts32.tv_sec < 0 ||
4297 ts32.tv_nsec >= 1000000000 ||
4301 CP(ts32, *tsp, tv_sec);
4302 CP(ts32, *tsp, tv_nsec);
4309 umtx_copyin_umtx_timex32(const void *uaddr, size_t size, struct _umtx_time *tp)
4311 struct umtx_timex32 t32;
4314 t32._clockid = CLOCK_REALTIME;
4316 if (size <= sizeof(t32._timeout))
4317 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4319 error = copyin(uaddr, &t32, sizeof(t32));
4322 if (t32._timeout.tv_sec < 0 ||
4323 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4325 TS_CP(t32, *tp, _timeout);
4326 CP(t32, *tp, _flags);
4327 CP(t32, *tp, _clockid);
4332 umtx_copyout_timeoutx32(void *uaddr, size_t sz, struct timespec *tsp)
4334 struct timespecx32 remain32 = {
4335 .tv_sec = tsp->tv_sec,
4336 .tv_nsec = tsp->tv_nsec,
4340 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4341 * and we're only called if sz >= sizeof(timespec) as supplied in the
4344 KASSERT(sz >= sizeof(remain32),
4345 ("umtx_copyops specifies incorrect sizes"));
4347 return (copyout(&remain32, uaddr, sizeof(remain32)));
4349 #endif /* __i386__ || __LP64__ */
4351 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap,
4352 const struct umtx_copyops *umtx_ops);
4354 static const _umtx_op_func op_table[] = {
4355 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4356 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4357 [UMTX_OP_WAIT] = __umtx_op_wait,
4358 [UMTX_OP_WAKE] = __umtx_op_wake,
4359 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4360 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
4361 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4362 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4363 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4364 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4365 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4366 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4367 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4368 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4369 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4370 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4371 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4372 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4373 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4374 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4375 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4376 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4378 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4379 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4381 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4382 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4383 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4384 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4385 [UMTX_OP_SHM] = __umtx_op_shm,
4386 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4389 static const struct umtx_copyops umtx_native_ops = {
4390 .copyin_timeout = umtx_copyin_timeout,
4391 .copyin_umtx_time = umtx_copyin_umtx_time,
4392 .copyin_robust_lists = umtx_copyin_robust_lists,
4393 .copyout_timeout = umtx_copyout_timeout,
4394 .timespec_sz = sizeof(struct timespec),
4395 .umtx_time_sz = sizeof(struct _umtx_time),
4399 static const struct umtx_copyops umtx_native_opsi386 = {
4400 .copyin_timeout = umtx_copyin_timeouti386,
4401 .copyin_umtx_time = umtx_copyin_umtx_timei386,
4402 .copyin_robust_lists = umtx_copyin_robust_lists32,
4403 .copyout_timeout = umtx_copyout_timeouti386,
4404 .timespec_sz = sizeof(struct timespeci386),
4405 .umtx_time_sz = sizeof(struct umtx_timei386),
4410 #if defined(__i386__) || defined(__LP64__)
4411 /* i386 can emulate other 32-bit archs, too! */
4412 static const struct umtx_copyops umtx_native_opsx32 = {
4413 .copyin_timeout = umtx_copyin_timeoutx32,
4414 .copyin_umtx_time = umtx_copyin_umtx_timex32,
4415 .copyin_robust_lists = umtx_copyin_robust_lists32,
4416 .copyout_timeout = umtx_copyout_timeoutx32,
4417 .timespec_sz = sizeof(struct timespecx32),
4418 .umtx_time_sz = sizeof(struct umtx_timex32),
4422 #ifdef COMPAT_FREEBSD32
4424 #define umtx_native_ops32 umtx_native_opsi386
4426 #define umtx_native_ops32 umtx_native_opsx32
4428 #endif /* COMPAT_FREEBSD32 */
4429 #endif /* __i386__ || __LP64__ */
4431 #define UMTX_OP__FLAGS (UMTX_OP__32BIT | UMTX_OP__I386)
4434 kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val,
4435 void *uaddr1, void *uaddr2, const struct umtx_copyops *ops)
4437 struct _umtx_op_args uap = {
4439 .op = op & ~UMTX_OP__FLAGS,
4445 if ((uap.op >= nitems(op_table)))
4447 return ((*op_table[uap.op])(td, &uap, ops));
4451 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4453 static const struct umtx_copyops *umtx_ops;
4455 umtx_ops = &umtx_native_ops;
4457 if ((uap->op & (UMTX_OP__32BIT | UMTX_OP__I386)) != 0) {
4458 if ((uap->op & UMTX_OP__I386) != 0)
4459 umtx_ops = &umtx_native_opsi386;
4461 umtx_ops = &umtx_native_opsx32;
4463 #elif !defined(__i386__)
4464 /* We consider UMTX_OP__32BIT a nop on !i386 ILP32. */
4465 if ((uap->op & UMTX_OP__I386) != 0)
4466 umtx_ops = &umtx_native_opsi386;
4468 /* Likewise, UMTX_OP__I386 is a nop on i386. */
4469 if ((uap->op & UMTX_OP__32BIT) != 0)
4470 umtx_ops = &umtx_native_opsx32;
4472 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4473 uap->uaddr2, umtx_ops));
4476 #ifdef COMPAT_FREEBSD32
4478 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap)
4481 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr,
4482 uap->uaddr2, &umtx_native_ops32));
4487 umtx_thread_init(struct thread *td)
4490 td->td_umtxq = umtxq_alloc();
4491 td->td_umtxq->uq_thread = td;
4495 umtx_thread_fini(struct thread *td)
4498 umtxq_free(td->td_umtxq);
4502 * It will be called when new thread is created, e.g fork().
4505 umtx_thread_alloc(struct thread *td)
4510 uq->uq_inherited_pri = PRI_MAX;
4512 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4513 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4514 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4515 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4521 * Clear robust lists for all process' threads, not delaying the
4522 * cleanup to thread exit, since the relevant address space is
4523 * destroyed right now.
4526 umtx_exec(struct proc *p)
4530 KASSERT(p == curproc, ("need curproc"));
4532 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4533 (p->p_flag & P_STOPPED_SINGLE) != 0,
4534 ("curproc must be single-threaded"));
4535 FOREACH_THREAD_IN_PROC(p, td) {
4536 KASSERT(td == curthread ||
4537 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4538 ("running thread %p %p", p, td));
4540 umtx_thread_cleanup(td);
4542 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4551 umtx_thread_exit(struct thread *td)
4554 umtx_thread_cleanup(td);
4558 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res, bool compat32)
4565 error = fueword32((void *)ptr, &res32);
4569 error = fueword((void *)ptr, &res1);
4579 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list,
4582 struct umutex32 m32;
4585 memcpy(&m32, m, sizeof(m32));
4586 *rb_list = m32.m_rb_lnk;
4588 *rb_list = m->m_rb_lnk;
4593 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact,
4599 KASSERT(td->td_proc == curproc, ("need current vmspace"));
4600 error = copyin((void *)rbp, &m, sizeof(m));
4603 if (rb_list != NULL)
4604 umtx_read_rb_list(td, &m, rb_list, compat32);
4605 if ((m.m_flags & UMUTEX_ROBUST) == 0)
4607 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
4608 /* inact is cleared after unlock, allow the inconsistency */
4609 return (inact ? 0 : EINVAL);
4610 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
4614 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
4615 const char *name, bool compat32)
4623 error = umtx_read_uptr(td, rb_list, &rbp, compat32);
4624 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
4625 if (rbp == *rb_inact) {
4630 error = umtx_handle_rb(td, rbp, &rbp, inact, compat32);
4632 if (i == umtx_max_rb && umtx_verbose_rb) {
4633 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
4634 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
4636 if (error != 0 && umtx_verbose_rb) {
4637 uprintf("comm %s pid %d: handling %srb error %d\n",
4638 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
4643 * Clean up umtx data.
4646 umtx_thread_cleanup(struct thread *td)
4654 * Disown pi mutexes.
4658 mtx_lock(&umtx_lock);
4659 uq->uq_inherited_pri = PRI_MAX;
4660 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4661 pi->pi_owner = NULL;
4662 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4664 mtx_unlock(&umtx_lock);
4666 sched_lend_user_prio(td, PRI_MAX);
4670 compat32 = (td->td_pflags2 & TDP2_COMPAT32RB) != 0;
4671 td->td_pflags2 &= ~TDP2_COMPAT32RB;
4674 * Handle terminated robust mutexes. Must be done after
4675 * robust pi disown, otherwise unlock could see unowned
4678 rb_inact = td->td_rb_inact;
4680 (void)umtx_read_uptr(td, rb_inact, &rb_inact, compat32);
4681 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "", compat32);
4682 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ", compat32);
4684 (void)umtx_handle_rb(td, rb_inact, NULL, true, compat32);