2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_umtx_profiling.h"
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
43 #include <sys/filedesc.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/taskqueue.h>
64 #include <sys/eventhandler.h>
67 #include <security/mac/mac_framework.h>
70 #include <vm/vm_param.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
75 #include <machine/atomic.h>
76 #include <machine/cpu.h>
78 #include <compat/freebsd32/freebsd32.h>
79 #ifdef COMPAT_FREEBSD32
80 #include <compat/freebsd32/freebsd32_proto.h>
84 #define _UMUTEX_WAIT 2
87 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
88 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
91 /* Priority inheritance mutex info. */
94 struct thread *pi_owner;
99 /* List entry to link umtx holding by thread */
100 TAILQ_ENTRY(umtx_pi) pi_link;
102 /* List entry in hash */
103 TAILQ_ENTRY(umtx_pi) pi_hashlink;
105 /* List for waiters */
106 TAILQ_HEAD(,umtx_q) pi_blocked;
108 /* Identify a userland lock object */
109 struct umtx_key pi_key;
112 /* A userland synchronous object user. */
114 /* Linked list for the hash. */
115 TAILQ_ENTRY(umtx_q) uq_link;
118 struct umtx_key uq_key;
122 #define UQF_UMTXQ 0x0001
124 /* The thread waits on. */
125 struct thread *uq_thread;
128 * Blocked on PI mutex. read can use chain lock
129 * or umtx_lock, write must have both chain lock and
130 * umtx_lock being hold.
132 struct umtx_pi *uq_pi_blocked;
134 /* On blocked list */
135 TAILQ_ENTRY(umtx_q) uq_lockq;
137 /* Thread contending with us */
138 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
140 /* Inherited priority from PP mutex */
141 u_char uq_inherited_pri;
143 /* Spare queue ready to be reused */
144 struct umtxq_queue *uq_spare_queue;
146 /* The queue we on */
147 struct umtxq_queue *uq_cur_queue;
150 TAILQ_HEAD(umtxq_head, umtx_q);
152 /* Per-key wait-queue */
154 struct umtxq_head head;
156 LIST_ENTRY(umtxq_queue) link;
160 LIST_HEAD(umtxq_list, umtxq_queue);
162 /* Userland lock object's wait-queue chain */
164 /* Lock for this chain. */
167 /* List of sleep queues. */
168 struct umtxq_list uc_queue[2];
169 #define UMTX_SHARED_QUEUE 0
170 #define UMTX_EXCLUSIVE_QUEUE 1
172 LIST_HEAD(, umtxq_queue) uc_spare_queue;
177 /* Chain lock waiters */
180 /* All PI in the list */
181 TAILQ_HEAD(,umtx_pi) uc_pi_list;
183 #ifdef UMTX_PROFILING
189 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
192 * Don't propagate time-sharing priority, there is a security reason,
193 * a user can simply introduce PI-mutex, let thread A lock the mutex,
194 * and let another thread B block on the mutex, because B is
195 * sleeping, its priority will be boosted, this causes A's priority to
196 * be boosted via priority propagating too and will never be lowered even
197 * if it is using 100%CPU, this is unfair to other processes.
200 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
201 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
202 PRI_MAX_TIMESHARE : (td)->td_user_pri)
204 #define GOLDEN_RATIO_PRIME 2654404609U
206 #define UMTX_CHAINS 512
208 #define UMTX_SHIFTS (__WORD_BIT - 9)
210 #define GET_SHARE(flags) \
211 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
213 #define BUSY_SPINS 200
217 bool is_abs_real; /* TIMER_ABSTIME && CLOCK_REALTIME* */
222 struct umtx_copyops {
223 int (*copyin_timeout)(const void *uaddr, struct timespec *tsp);
224 int (*copyin_umtx_time)(const void *uaddr, size_t size,
225 struct _umtx_time *tp);
226 int (*copyin_robust_lists)(const void *uaddr, size_t size,
227 struct umtx_robust_lists_params *rbp);
228 int (*copyout_timeout)(void *uaddr, size_t size,
229 struct timespec *tsp);
230 const size_t timespec_sz;
231 const size_t umtx_time_sz;
235 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
236 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
237 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
239 int umtx_shm_vnobj_persistent = 0;
240 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
241 &umtx_shm_vnobj_persistent, 0,
242 "False forces destruction of umtx attached to file, on last close");
243 static int umtx_max_rb = 1000;
244 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
246 "Maximum number of robust mutexes allowed for each thread");
248 static uma_zone_t umtx_pi_zone;
249 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
250 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
251 static int umtx_pi_allocated;
253 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
255 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
256 &umtx_pi_allocated, 0, "Allocated umtx_pi");
257 static int umtx_verbose_rb = 1;
258 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
262 #ifdef UMTX_PROFILING
263 static long max_length;
264 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
265 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
269 static void abs_timeout_update(struct abs_timeout *timo);
271 static void umtx_shm_init(void);
272 static void umtxq_sysinit(void *);
273 static void umtxq_hash(struct umtx_key *key);
274 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
275 static void umtxq_lock(struct umtx_key *key);
276 static void umtxq_unlock(struct umtx_key *key);
277 static void umtxq_busy(struct umtx_key *key);
278 static void umtxq_unbusy(struct umtx_key *key);
279 static void umtxq_insert_queue(struct umtx_q *uq, int q);
280 static void umtxq_remove_queue(struct umtx_q *uq, int q);
281 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
282 static int umtxq_count(struct umtx_key *key);
283 static struct umtx_pi *umtx_pi_alloc(int);
284 static void umtx_pi_free(struct umtx_pi *pi);
285 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
287 static void umtx_thread_cleanup(struct thread *td);
288 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
290 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
291 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
292 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
294 static struct mtx umtx_lock;
296 #ifdef UMTX_PROFILING
298 umtx_init_profiling(void)
300 struct sysctl_oid *chain_oid;
304 for (i = 0; i < UMTX_CHAINS; ++i) {
305 snprintf(chain_name, sizeof(chain_name), "%d", i);
306 chain_oid = SYSCTL_ADD_NODE(NULL,
307 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
308 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
310 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
311 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
312 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
313 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
318 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
322 struct umtxq_chain *uc;
323 u_int fract, i, j, tot, whole;
324 u_int sf0, sf1, sf2, sf3, sf4;
325 u_int si0, si1, si2, si3, si4;
326 u_int sw0, sw1, sw2, sw3, sw4;
328 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
329 for (i = 0; i < 2; i++) {
331 for (j = 0; j < UMTX_CHAINS; ++j) {
332 uc = &umtxq_chains[i][j];
333 mtx_lock(&uc->uc_lock);
334 tot += uc->max_length;
335 mtx_unlock(&uc->uc_lock);
338 sbuf_printf(&sb, "%u) Empty ", i);
340 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
341 si0 = si1 = si2 = si3 = si4 = 0;
342 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
343 for (j = 0; j < UMTX_CHAINS; j++) {
344 uc = &umtxq_chains[i][j];
345 mtx_lock(&uc->uc_lock);
346 whole = uc->max_length * 100;
347 mtx_unlock(&uc->uc_lock);
348 fract = (whole % tot) * 100;
349 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
353 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
358 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
363 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
368 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
375 sbuf_printf(&sb, "queue %u:\n", i);
376 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
378 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
380 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
382 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
384 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
390 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
396 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
398 struct umtxq_chain *uc;
403 error = sysctl_handle_int(oidp, &clear, 0, req);
404 if (error != 0 || req->newptr == NULL)
408 for (i = 0; i < 2; ++i) {
409 for (j = 0; j < UMTX_CHAINS; ++j) {
410 uc = &umtxq_chains[i][j];
411 mtx_lock(&uc->uc_lock);
414 mtx_unlock(&uc->uc_lock);
421 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
422 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
423 sysctl_debug_umtx_chains_clear, "I",
424 "Clear umtx chains statistics");
425 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
426 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
427 sysctl_debug_umtx_chains_peaks, "A",
428 "Highest peaks in chains max length");
432 umtxq_sysinit(void *arg __unused)
436 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
437 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
438 for (i = 0; i < 2; ++i) {
439 for (j = 0; j < UMTX_CHAINS; ++j) {
440 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
441 MTX_DEF | MTX_DUPOK);
442 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
443 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
444 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
445 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
446 umtxq_chains[i][j].uc_busy = 0;
447 umtxq_chains[i][j].uc_waiters = 0;
448 #ifdef UMTX_PROFILING
449 umtxq_chains[i][j].length = 0;
450 umtxq_chains[i][j].max_length = 0;
454 #ifdef UMTX_PROFILING
455 umtx_init_profiling();
457 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
466 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
467 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
469 TAILQ_INIT(&uq->uq_spare_queue->head);
470 TAILQ_INIT(&uq->uq_pi_contested);
471 uq->uq_inherited_pri = PRI_MAX;
476 umtxq_free(struct umtx_q *uq)
479 MPASS(uq->uq_spare_queue != NULL);
480 free(uq->uq_spare_queue, M_UMTX);
485 umtxq_hash(struct umtx_key *key)
489 n = (uintptr_t)key->info.both.a + key->info.both.b;
490 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
493 static inline struct umtxq_chain *
494 umtxq_getchain(struct umtx_key *key)
497 if (key->type <= TYPE_SEM)
498 return (&umtxq_chains[1][key->hash]);
499 return (&umtxq_chains[0][key->hash]);
506 umtxq_lock(struct umtx_key *key)
508 struct umtxq_chain *uc;
510 uc = umtxq_getchain(key);
511 mtx_lock(&uc->uc_lock);
518 umtxq_unlock(struct umtx_key *key)
520 struct umtxq_chain *uc;
522 uc = umtxq_getchain(key);
523 mtx_unlock(&uc->uc_lock);
527 * Set chain to busy state when following operation
528 * may be blocked (kernel mutex can not be used).
531 umtxq_busy(struct umtx_key *key)
533 struct umtxq_chain *uc;
535 uc = umtxq_getchain(key);
536 mtx_assert(&uc->uc_lock, MA_OWNED);
540 int count = BUSY_SPINS;
543 while (uc->uc_busy && --count > 0)
549 while (uc->uc_busy) {
551 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
562 umtxq_unbusy(struct umtx_key *key)
564 struct umtxq_chain *uc;
566 uc = umtxq_getchain(key);
567 mtx_assert(&uc->uc_lock, MA_OWNED);
568 KASSERT(uc->uc_busy != 0, ("not busy"));
575 umtxq_unbusy_unlocked(struct umtx_key *key)
583 static struct umtxq_queue *
584 umtxq_queue_lookup(struct umtx_key *key, int q)
586 struct umtxq_queue *uh;
587 struct umtxq_chain *uc;
589 uc = umtxq_getchain(key);
590 UMTXQ_LOCKED_ASSERT(uc);
591 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
592 if (umtx_key_match(&uh->key, key))
600 umtxq_insert_queue(struct umtx_q *uq, int q)
602 struct umtxq_queue *uh;
603 struct umtxq_chain *uc;
605 uc = umtxq_getchain(&uq->uq_key);
606 UMTXQ_LOCKED_ASSERT(uc);
607 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
608 uh = umtxq_queue_lookup(&uq->uq_key, q);
610 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
612 uh = uq->uq_spare_queue;
613 uh->key = uq->uq_key;
614 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
615 #ifdef UMTX_PROFILING
617 if (uc->length > uc->max_length) {
618 uc->max_length = uc->length;
619 if (uc->max_length > max_length)
620 max_length = uc->max_length;
624 uq->uq_spare_queue = NULL;
626 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
628 uq->uq_flags |= UQF_UMTXQ;
629 uq->uq_cur_queue = uh;
634 umtxq_remove_queue(struct umtx_q *uq, int q)
636 struct umtxq_chain *uc;
637 struct umtxq_queue *uh;
639 uc = umtxq_getchain(&uq->uq_key);
640 UMTXQ_LOCKED_ASSERT(uc);
641 if (uq->uq_flags & UQF_UMTXQ) {
642 uh = uq->uq_cur_queue;
643 TAILQ_REMOVE(&uh->head, uq, uq_link);
645 uq->uq_flags &= ~UQF_UMTXQ;
646 if (TAILQ_EMPTY(&uh->head)) {
647 KASSERT(uh->length == 0,
648 ("inconsistent umtxq_queue length"));
649 #ifdef UMTX_PROFILING
652 LIST_REMOVE(uh, link);
654 uh = LIST_FIRST(&uc->uc_spare_queue);
655 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
656 LIST_REMOVE(uh, link);
658 uq->uq_spare_queue = uh;
659 uq->uq_cur_queue = NULL;
664 * Check if there are multiple waiters
667 umtxq_count(struct umtx_key *key)
669 struct umtxq_queue *uh;
671 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
672 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
679 * Check if there are multiple PI waiters and returns first
683 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
685 struct umtxq_queue *uh;
688 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
689 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
691 *first = TAILQ_FIRST(&uh->head);
698 * Wake up threads waiting on an userland object.
702 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
704 struct umtxq_queue *uh;
709 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
710 uh = umtxq_queue_lookup(key, q);
712 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
713 umtxq_remove_queue(uq, q);
723 * Wake up specified thread.
726 umtxq_signal_thread(struct umtx_q *uq)
729 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
735 tstohz(const struct timespec *tsp)
739 TIMESPEC_TO_TIMEVAL(&tv, tsp);
744 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
745 const struct timespec *timeout)
748 timo->clockid = clockid;
750 timo->is_abs_real = false;
751 abs_timeout_update(timo);
752 timespecadd(&timo->cur, timeout, &timo->end);
754 timo->end = *timeout;
755 timo->is_abs_real = clockid == CLOCK_REALTIME ||
756 clockid == CLOCK_REALTIME_FAST ||
757 clockid == CLOCK_REALTIME_PRECISE;
759 * If is_abs_real, umtxq_sleep will read the clock
760 * after setting td_rtcgen; otherwise, read it here.
762 if (!timo->is_abs_real) {
763 abs_timeout_update(timo);
769 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
772 abs_timeout_init(timo, umtxtime->_clockid,
773 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
777 abs_timeout_update(struct abs_timeout *timo)
780 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
784 abs_timeout_gethz(struct abs_timeout *timo)
788 if (timespeccmp(&timo->end, &timo->cur, <=))
790 timespecsub(&timo->end, &timo->cur, &tts);
791 return (tstohz(&tts));
795 umtx_unlock_val(uint32_t flags, bool rb)
799 return (UMUTEX_RB_OWNERDEAD);
800 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
801 return (UMUTEX_RB_NOTRECOV);
803 return (UMUTEX_UNOWNED);
808 * Put thread into sleep state, before sleeping, check if
809 * thread was removed from umtx queue.
812 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
814 struct umtxq_chain *uc;
817 if (abstime != NULL && abstime->is_abs_real) {
818 curthread->td_rtcgen = atomic_load_acq_int(&rtc_generation);
819 abs_timeout_update(abstime);
822 uc = umtxq_getchain(&uq->uq_key);
823 UMTXQ_LOCKED_ASSERT(uc);
825 if (!(uq->uq_flags & UQF_UMTXQ)) {
829 if (abstime != NULL) {
830 timo = abs_timeout_gethz(abstime);
837 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
838 if (error == EINTR || error == ERESTART) {
839 umtxq_lock(&uq->uq_key);
842 if (abstime != NULL) {
843 if (abstime->is_abs_real)
844 curthread->td_rtcgen =
845 atomic_load_acq_int(&rtc_generation);
846 abs_timeout_update(abstime);
848 umtxq_lock(&uq->uq_key);
851 curthread->td_rtcgen = 0;
856 * Convert userspace address into unique logical address.
859 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
861 struct thread *td = curthread;
863 vm_map_entry_t entry;
869 if (share == THREAD_SHARE) {
871 key->info.private.vs = td->td_proc->p_vmspace;
872 key->info.private.addr = (uintptr_t)addr;
874 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
875 map = &td->td_proc->p_vmspace->vm_map;
876 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
877 &entry, &key->info.shared.object, &pindex, &prot,
878 &wired) != KERN_SUCCESS) {
882 if ((share == PROCESS_SHARE) ||
883 (share == AUTO_SHARE &&
884 VM_INHERIT_SHARE == entry->inheritance)) {
886 key->info.shared.offset = (vm_offset_t)addr -
887 entry->start + entry->offset;
888 vm_object_reference(key->info.shared.object);
891 key->info.private.vs = td->td_proc->p_vmspace;
892 key->info.private.addr = (uintptr_t)addr;
894 vm_map_lookup_done(map, entry);
905 umtx_key_release(struct umtx_key *key)
908 vm_object_deallocate(key->info.shared.object);
912 * Fetch and compare value, sleep on the address if value is not changed.
915 do_wait(struct thread *td, void *addr, u_long id,
916 struct _umtx_time *timeout, int compat32, int is_private)
918 struct abs_timeout timo;
925 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
926 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
930 abs_timeout_init2(&timo, timeout);
932 umtxq_lock(&uq->uq_key);
934 umtxq_unlock(&uq->uq_key);
936 error = fueword(addr, &tmp);
940 error = fueword32(addr, &tmp32);
946 umtxq_lock(&uq->uq_key);
949 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
951 if ((uq->uq_flags & UQF_UMTXQ) == 0)
955 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
958 umtxq_unlock(&uq->uq_key);
959 umtx_key_release(&uq->uq_key);
960 if (error == ERESTART)
966 * Wake up threads sleeping on the specified address.
969 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
974 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
975 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
978 umtxq_signal(&key, n_wake);
980 umtx_key_release(&key);
985 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
988 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
989 struct _umtx_time *timeout, int mode)
991 struct abs_timeout timo;
993 uint32_t owner, old, id;
1000 abs_timeout_init2(&timo, timeout);
1003 * Care must be exercised when dealing with umtx structure. It
1004 * can fault on any access.
1007 rv = fueword32(&m->m_owner, &owner);
1010 if (mode == _UMUTEX_WAIT) {
1011 if (owner == UMUTEX_UNOWNED ||
1012 owner == UMUTEX_CONTESTED ||
1013 owner == UMUTEX_RB_OWNERDEAD ||
1014 owner == UMUTEX_RB_NOTRECOV)
1018 * Robust mutex terminated. Kernel duty is to
1019 * return EOWNERDEAD to the userspace. The
1020 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1021 * by the common userspace code.
1023 if (owner == UMUTEX_RB_OWNERDEAD) {
1024 rv = casueword32(&m->m_owner,
1025 UMUTEX_RB_OWNERDEAD, &owner,
1026 id | UMUTEX_CONTESTED);
1030 MPASS(owner == UMUTEX_RB_OWNERDEAD);
1031 return (EOWNERDEAD); /* success */
1034 rv = thread_check_susp(td, false);
1039 if (owner == UMUTEX_RB_NOTRECOV)
1040 return (ENOTRECOVERABLE);
1043 * Try the uncontested case. This should be
1046 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1048 /* The address was invalid. */
1052 /* The acquire succeeded. */
1054 MPASS(owner == UMUTEX_UNOWNED);
1059 * If no one owns it but it is contested try
1063 if (owner == UMUTEX_CONTESTED) {
1064 rv = casueword32(&m->m_owner,
1065 UMUTEX_CONTESTED, &owner,
1066 id | UMUTEX_CONTESTED);
1067 /* The address was invalid. */
1071 MPASS(owner == UMUTEX_CONTESTED);
1075 rv = thread_check_susp(td, false);
1081 * If this failed the lock has
1087 /* rv == 1 but not contested, likely store failure */
1088 rv = thread_check_susp(td, false);
1093 if (mode == _UMUTEX_TRY)
1097 * If we caught a signal, we have retried and now
1103 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1104 GET_SHARE(flags), &uq->uq_key)) != 0)
1107 umtxq_lock(&uq->uq_key);
1108 umtxq_busy(&uq->uq_key);
1110 umtxq_unlock(&uq->uq_key);
1113 * Set the contested bit so that a release in user space
1114 * knows to use the system call for unlock. If this fails
1115 * either some one else has acquired the lock or it has been
1118 rv = casueword32(&m->m_owner, owner, &old,
1119 owner | UMUTEX_CONTESTED);
1121 /* The address was invalid or casueword failed to store. */
1122 if (rv == -1 || rv == 1) {
1123 umtxq_lock(&uq->uq_key);
1125 umtxq_unbusy(&uq->uq_key);
1126 umtxq_unlock(&uq->uq_key);
1127 umtx_key_release(&uq->uq_key);
1131 rv = thread_check_susp(td, false);
1139 * We set the contested bit, sleep. Otherwise the lock changed
1140 * and we need to retry or we lost a race to the thread
1141 * unlocking the umtx.
1143 umtxq_lock(&uq->uq_key);
1144 umtxq_unbusy(&uq->uq_key);
1145 MPASS(old == owner);
1146 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1149 umtxq_unlock(&uq->uq_key);
1150 umtx_key_release(&uq->uq_key);
1153 error = thread_check_susp(td, false);
1160 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1163 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1165 struct umtx_key key;
1166 uint32_t owner, old, id, newlock;
1173 * Make sure we own this mtx.
1175 error = fueword32(&m->m_owner, &owner);
1179 if ((owner & ~UMUTEX_CONTESTED) != id)
1182 newlock = umtx_unlock_val(flags, rb);
1183 if ((owner & UMUTEX_CONTESTED) == 0) {
1184 error = casueword32(&m->m_owner, owner, &old, newlock);
1188 error = thread_check_susp(td, false);
1193 MPASS(old == owner);
1197 /* We should only ever be in here for contested locks */
1198 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1204 count = umtxq_count(&key);
1208 * When unlocking the umtx, it must be marked as unowned if
1209 * there is zero or one thread only waiting for it.
1210 * Otherwise, it must be marked as contested.
1213 newlock |= UMUTEX_CONTESTED;
1214 error = casueword32(&m->m_owner, owner, &old, newlock);
1216 umtxq_signal(&key, 1);
1219 umtx_key_release(&key);
1225 error = thread_check_susp(td, false);
1234 * Check if the mutex is available and wake up a waiter,
1235 * only for simple mutex.
1238 do_wake_umutex(struct thread *td, struct umutex *m)
1240 struct umtx_key key;
1247 error = fueword32(&m->m_owner, &owner);
1251 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1252 owner != UMUTEX_RB_NOTRECOV)
1255 error = fueword32(&m->m_flags, &flags);
1259 /* We should only ever be in here for contested locks */
1260 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1266 count = umtxq_count(&key);
1269 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1270 owner != UMUTEX_RB_NOTRECOV) {
1271 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1275 } else if (error == 1) {
1279 umtx_key_release(&key);
1280 error = thread_check_susp(td, false);
1288 if (error == 0 && count != 0) {
1289 MPASS((owner & ~UMUTEX_CONTESTED) == 0 ||
1290 owner == UMUTEX_RB_OWNERDEAD ||
1291 owner == UMUTEX_RB_NOTRECOV);
1292 umtxq_signal(&key, 1);
1296 umtx_key_release(&key);
1301 * Check if the mutex has waiters and tries to fix contention bit.
1304 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1306 struct umtx_key key;
1307 uint32_t owner, old;
1312 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1316 type = TYPE_NORMAL_UMUTEX;
1318 case UMUTEX_PRIO_INHERIT:
1319 type = TYPE_PI_UMUTEX;
1321 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1322 type = TYPE_PI_ROBUST_UMUTEX;
1324 case UMUTEX_PRIO_PROTECT:
1325 type = TYPE_PP_UMUTEX;
1327 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1328 type = TYPE_PP_ROBUST_UMUTEX;
1333 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1339 count = umtxq_count(&key);
1342 error = fueword32(&m->m_owner, &owner);
1347 * Only repair contention bit if there is a waiter, this means
1348 * the mutex is still being referenced by userland code,
1349 * otherwise don't update any memory.
1351 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 &&
1352 (count > 1 || (count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) {
1353 error = casueword32(&m->m_owner, owner, &old,
1354 owner | UMUTEX_CONTESTED);
1360 MPASS(old == owner);
1364 error = thread_check_susp(td, false);
1368 if (error == EFAULT) {
1369 umtxq_signal(&key, INT_MAX);
1370 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1371 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1372 umtxq_signal(&key, 1);
1375 umtx_key_release(&key);
1379 static inline struct umtx_pi *
1380 umtx_pi_alloc(int flags)
1384 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1385 TAILQ_INIT(&pi->pi_blocked);
1386 atomic_add_int(&umtx_pi_allocated, 1);
1391 umtx_pi_free(struct umtx_pi *pi)
1393 uma_zfree(umtx_pi_zone, pi);
1394 atomic_add_int(&umtx_pi_allocated, -1);
1398 * Adjust the thread's position on a pi_state after its priority has been
1402 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1404 struct umtx_q *uq, *uq1, *uq2;
1407 mtx_assert(&umtx_lock, MA_OWNED);
1414 * Check if the thread needs to be moved on the blocked chain.
1415 * It needs to be moved if either its priority is lower than
1416 * the previous thread or higher than the next thread.
1418 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1419 uq2 = TAILQ_NEXT(uq, uq_lockq);
1420 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1421 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1423 * Remove thread from blocked chain and determine where
1424 * it should be moved to.
1426 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1427 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1428 td1 = uq1->uq_thread;
1429 MPASS(td1->td_proc->p_magic == P_MAGIC);
1430 if (UPRI(td1) > UPRI(td))
1435 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1437 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1442 static struct umtx_pi *
1443 umtx_pi_next(struct umtx_pi *pi)
1445 struct umtx_q *uq_owner;
1447 if (pi->pi_owner == NULL)
1449 uq_owner = pi->pi_owner->td_umtxq;
1450 if (uq_owner == NULL)
1452 return (uq_owner->uq_pi_blocked);
1456 * Floyd's Cycle-Finding Algorithm.
1459 umtx_pi_check_loop(struct umtx_pi *pi)
1461 struct umtx_pi *pi1; /* fast iterator */
1463 mtx_assert(&umtx_lock, MA_OWNED);
1468 pi = umtx_pi_next(pi);
1471 pi1 = umtx_pi_next(pi1);
1474 pi1 = umtx_pi_next(pi1);
1484 * Propagate priority when a thread is blocked on POSIX
1488 umtx_propagate_priority(struct thread *td)
1494 mtx_assert(&umtx_lock, MA_OWNED);
1497 pi = uq->uq_pi_blocked;
1500 if (umtx_pi_check_loop(pi))
1505 if (td == NULL || td == curthread)
1508 MPASS(td->td_proc != NULL);
1509 MPASS(td->td_proc->p_magic == P_MAGIC);
1512 if (td->td_lend_user_pri > pri)
1513 sched_lend_user_prio(td, pri);
1521 * Pick up the lock that td is blocked on.
1524 pi = uq->uq_pi_blocked;
1527 /* Resort td on the list if needed. */
1528 umtx_pi_adjust_thread(pi, td);
1533 * Unpropagate priority for a PI mutex when a thread blocked on
1534 * it is interrupted by signal or resumed by others.
1537 umtx_repropagate_priority(struct umtx_pi *pi)
1539 struct umtx_q *uq, *uq_owner;
1540 struct umtx_pi *pi2;
1543 mtx_assert(&umtx_lock, MA_OWNED);
1545 if (umtx_pi_check_loop(pi))
1547 while (pi != NULL && pi->pi_owner != NULL) {
1549 uq_owner = pi->pi_owner->td_umtxq;
1551 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1552 uq = TAILQ_FIRST(&pi2->pi_blocked);
1554 if (pri > UPRI(uq->uq_thread))
1555 pri = UPRI(uq->uq_thread);
1559 if (pri > uq_owner->uq_inherited_pri)
1560 pri = uq_owner->uq_inherited_pri;
1561 thread_lock(pi->pi_owner);
1562 sched_lend_user_prio(pi->pi_owner, pri);
1563 thread_unlock(pi->pi_owner);
1564 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1565 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1570 * Insert a PI mutex into owned list.
1573 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1575 struct umtx_q *uq_owner;
1577 uq_owner = owner->td_umtxq;
1578 mtx_assert(&umtx_lock, MA_OWNED);
1579 MPASS(pi->pi_owner == NULL);
1580 pi->pi_owner = owner;
1581 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1585 * Disown a PI mutex, and remove it from the owned list.
1588 umtx_pi_disown(struct umtx_pi *pi)
1591 mtx_assert(&umtx_lock, MA_OWNED);
1592 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1593 pi->pi_owner = NULL;
1597 * Claim ownership of a PI mutex.
1600 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1605 mtx_lock(&umtx_lock);
1606 if (pi->pi_owner == owner) {
1607 mtx_unlock(&umtx_lock);
1611 if (pi->pi_owner != NULL) {
1613 * userland may have already messed the mutex, sigh.
1615 mtx_unlock(&umtx_lock);
1618 umtx_pi_setowner(pi, owner);
1619 uq = TAILQ_FIRST(&pi->pi_blocked);
1621 pri = UPRI(uq->uq_thread);
1623 if (pri < UPRI(owner))
1624 sched_lend_user_prio(owner, pri);
1625 thread_unlock(owner);
1627 mtx_unlock(&umtx_lock);
1632 * Adjust a thread's order position in its blocked PI mutex,
1633 * this may result new priority propagating process.
1636 umtx_pi_adjust(struct thread *td, u_char oldpri)
1642 mtx_lock(&umtx_lock);
1644 * Pick up the lock that td is blocked on.
1646 pi = uq->uq_pi_blocked;
1648 umtx_pi_adjust_thread(pi, td);
1649 umtx_repropagate_priority(pi);
1651 mtx_unlock(&umtx_lock);
1655 * Sleep on a PI mutex.
1658 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1659 const char *wmesg, struct abs_timeout *timo, bool shared)
1661 struct thread *td, *td1;
1665 struct umtxq_chain *uc;
1667 uc = umtxq_getchain(&pi->pi_key);
1671 KASSERT(td == curthread, ("inconsistent uq_thread"));
1672 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
1673 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1675 mtx_lock(&umtx_lock);
1676 if (pi->pi_owner == NULL) {
1677 mtx_unlock(&umtx_lock);
1678 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
1679 mtx_lock(&umtx_lock);
1681 if (pi->pi_owner == NULL)
1682 umtx_pi_setowner(pi, td1);
1683 PROC_UNLOCK(td1->td_proc);
1687 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1688 pri = UPRI(uq1->uq_thread);
1694 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1696 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1698 uq->uq_pi_blocked = pi;
1700 td->td_flags |= TDF_UPIBLOCKED;
1702 umtx_propagate_priority(td);
1703 mtx_unlock(&umtx_lock);
1704 umtxq_unbusy(&uq->uq_key);
1706 error = umtxq_sleep(uq, wmesg, timo);
1709 mtx_lock(&umtx_lock);
1710 uq->uq_pi_blocked = NULL;
1712 td->td_flags &= ~TDF_UPIBLOCKED;
1714 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1715 umtx_repropagate_priority(pi);
1716 mtx_unlock(&umtx_lock);
1717 umtxq_unlock(&uq->uq_key);
1723 * Add reference count for a PI mutex.
1726 umtx_pi_ref(struct umtx_pi *pi)
1729 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key));
1734 * Decrease reference count for a PI mutex, if the counter
1735 * is decreased to zero, its memory space is freed.
1738 umtx_pi_unref(struct umtx_pi *pi)
1740 struct umtxq_chain *uc;
1742 uc = umtxq_getchain(&pi->pi_key);
1743 UMTXQ_LOCKED_ASSERT(uc);
1744 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1745 if (--pi->pi_refcount == 0) {
1746 mtx_lock(&umtx_lock);
1747 if (pi->pi_owner != NULL)
1749 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1750 ("blocked queue not empty"));
1751 mtx_unlock(&umtx_lock);
1752 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1758 * Find a PI mutex in hash table.
1760 static struct umtx_pi *
1761 umtx_pi_lookup(struct umtx_key *key)
1763 struct umtxq_chain *uc;
1766 uc = umtxq_getchain(key);
1767 UMTXQ_LOCKED_ASSERT(uc);
1769 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1770 if (umtx_key_match(&pi->pi_key, key)) {
1778 * Insert a PI mutex into hash table.
1781 umtx_pi_insert(struct umtx_pi *pi)
1783 struct umtxq_chain *uc;
1785 uc = umtxq_getchain(&pi->pi_key);
1786 UMTXQ_LOCKED_ASSERT(uc);
1787 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1794 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1795 struct _umtx_time *timeout, int try)
1797 struct abs_timeout timo;
1799 struct umtx_pi *pi, *new_pi;
1800 uint32_t id, old_owner, owner, old;
1806 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
1807 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
1811 if (timeout != NULL)
1812 abs_timeout_init2(&timo, timeout);
1814 umtxq_lock(&uq->uq_key);
1815 pi = umtx_pi_lookup(&uq->uq_key);
1817 new_pi = umtx_pi_alloc(M_NOWAIT);
1818 if (new_pi == NULL) {
1819 umtxq_unlock(&uq->uq_key);
1820 new_pi = umtx_pi_alloc(M_WAITOK);
1821 umtxq_lock(&uq->uq_key);
1822 pi = umtx_pi_lookup(&uq->uq_key);
1824 umtx_pi_free(new_pi);
1828 if (new_pi != NULL) {
1829 new_pi->pi_key = uq->uq_key;
1830 umtx_pi_insert(new_pi);
1835 umtxq_unlock(&uq->uq_key);
1838 * Care must be exercised when dealing with umtx structure. It
1839 * can fault on any access.
1843 * Try the uncontested case. This should be done in userland.
1845 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
1846 /* The address was invalid. */
1851 /* The acquire succeeded. */
1853 MPASS(owner == UMUTEX_UNOWNED);
1858 if (owner == UMUTEX_RB_NOTRECOV) {
1859 error = ENOTRECOVERABLE;
1864 * Avoid overwriting a possible error from sleep due
1865 * to the pending signal with suspension check result.
1868 error = thread_check_susp(td, true);
1873 /* If no one owns it but it is contested try to acquire it. */
1874 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
1876 rv = casueword32(&m->m_owner, owner, &owner,
1877 id | UMUTEX_CONTESTED);
1878 /* The address was invalid. */
1885 error = thread_check_susp(td, true);
1891 * If this failed the lock could
1898 MPASS(owner == old_owner);
1899 umtxq_lock(&uq->uq_key);
1900 umtxq_busy(&uq->uq_key);
1901 error = umtx_pi_claim(pi, td);
1902 umtxq_unbusy(&uq->uq_key);
1903 umtxq_unlock(&uq->uq_key);
1906 * Since we're going to return an
1907 * error, restore the m_owner to its
1908 * previous, unowned state to avoid
1909 * compounding the problem.
1911 (void)casuword32(&m->m_owner,
1912 id | UMUTEX_CONTESTED, old_owner);
1914 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD)
1919 if ((owner & ~UMUTEX_CONTESTED) == id) {
1930 * If we caught a signal, we have retried and now
1936 umtxq_lock(&uq->uq_key);
1937 umtxq_busy(&uq->uq_key);
1938 umtxq_unlock(&uq->uq_key);
1941 * Set the contested bit so that a release in user space
1942 * knows to use the system call for unlock. If this fails
1943 * either some one else has acquired the lock or it has been
1946 rv = casueword32(&m->m_owner, owner, &old, owner |
1949 /* The address was invalid. */
1951 umtxq_unbusy_unlocked(&uq->uq_key);
1956 umtxq_unbusy_unlocked(&uq->uq_key);
1957 error = thread_check_susp(td, true);
1962 * The lock changed and we need to retry or we
1963 * lost a race to the thread unlocking the
1964 * umtx. Note that the UMUTEX_RB_OWNERDEAD
1965 * value for owner is impossible there.
1970 umtxq_lock(&uq->uq_key);
1972 /* We set the contested bit, sleep. */
1973 MPASS(old == owner);
1974 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
1975 "umtxpi", timeout == NULL ? NULL : &timo,
1976 (flags & USYNC_PROCESS_SHARED) != 0);
1980 error = thread_check_susp(td, false);
1985 umtxq_lock(&uq->uq_key);
1987 umtxq_unlock(&uq->uq_key);
1989 umtx_key_release(&uq->uq_key);
1994 * Unlock a PI mutex.
1997 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1999 struct umtx_key key;
2000 struct umtx_q *uq_first, *uq_first2, *uq_me;
2001 struct umtx_pi *pi, *pi2;
2002 uint32_t id, new_owner, old, owner;
2003 int count, error, pri;
2009 * Make sure we own this mtx.
2011 error = fueword32(&m->m_owner, &owner);
2015 if ((owner & ~UMUTEX_CONTESTED) != id)
2018 new_owner = umtx_unlock_val(flags, rb);
2020 /* This should be done in userland */
2021 if ((owner & UMUTEX_CONTESTED) == 0) {
2022 error = casueword32(&m->m_owner, owner, &old, new_owner);
2026 error = thread_check_susp(td, true);
2036 /* We should only ever be in here for contested locks */
2037 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2038 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2044 count = umtxq_count_pi(&key, &uq_first);
2045 if (uq_first != NULL) {
2046 mtx_lock(&umtx_lock);
2047 pi = uq_first->uq_pi_blocked;
2048 KASSERT(pi != NULL, ("pi == NULL?"));
2049 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2050 mtx_unlock(&umtx_lock);
2053 umtx_key_release(&key);
2054 /* userland messed the mutex */
2057 uq_me = td->td_umtxq;
2058 if (pi->pi_owner == td)
2060 /* get highest priority thread which is still sleeping. */
2061 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2062 while (uq_first != NULL &&
2063 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2064 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2067 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2068 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2069 if (uq_first2 != NULL) {
2070 if (pri > UPRI(uq_first2->uq_thread))
2071 pri = UPRI(uq_first2->uq_thread);
2075 sched_lend_user_prio(td, pri);
2077 mtx_unlock(&umtx_lock);
2079 umtxq_signal_thread(uq_first);
2081 pi = umtx_pi_lookup(&key);
2083 * A umtx_pi can exist if a signal or timeout removed the
2084 * last waiter from the umtxq, but there is still
2085 * a thread in do_lock_pi() holding the umtx_pi.
2089 * The umtx_pi can be unowned, such as when a thread
2090 * has just entered do_lock_pi(), allocated the
2091 * umtx_pi, and unlocked the umtxq.
2092 * If the current thread owns it, it must disown it.
2094 mtx_lock(&umtx_lock);
2095 if (pi->pi_owner == td)
2097 mtx_unlock(&umtx_lock);
2103 * When unlocking the umtx, it must be marked as unowned if
2104 * there is zero or one thread only waiting for it.
2105 * Otherwise, it must be marked as contested.
2109 new_owner |= UMUTEX_CONTESTED;
2111 error = casueword32(&m->m_owner, owner, &old, new_owner);
2113 error = thread_check_susp(td, false);
2117 umtxq_unbusy_unlocked(&key);
2118 umtx_key_release(&key);
2121 if (error == 0 && old != owner)
2130 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2131 struct _umtx_time *timeout, int try)
2133 struct abs_timeout timo;
2134 struct umtx_q *uq, *uq2;
2138 int error, pri, old_inherited_pri, su, rv;
2142 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2143 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2147 if (timeout != NULL)
2148 abs_timeout_init2(&timo, timeout);
2150 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2152 old_inherited_pri = uq->uq_inherited_pri;
2153 umtxq_lock(&uq->uq_key);
2154 umtxq_busy(&uq->uq_key);
2155 umtxq_unlock(&uq->uq_key);
2157 rv = fueword32(&m->m_ceilings[0], &ceiling);
2162 ceiling = RTP_PRIO_MAX - ceiling;
2163 if (ceiling > RTP_PRIO_MAX) {
2168 mtx_lock(&umtx_lock);
2169 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2170 mtx_unlock(&umtx_lock);
2174 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2175 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2177 if (uq->uq_inherited_pri < UPRI(td))
2178 sched_lend_user_prio(td, uq->uq_inherited_pri);
2181 mtx_unlock(&umtx_lock);
2183 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2184 id | UMUTEX_CONTESTED);
2185 /* The address was invalid. */
2191 MPASS(owner == UMUTEX_CONTESTED);
2196 if (owner == UMUTEX_RB_OWNERDEAD) {
2197 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2198 &owner, id | UMUTEX_CONTESTED);
2204 MPASS(owner == UMUTEX_RB_OWNERDEAD);
2205 error = EOWNERDEAD; /* success */
2210 * rv == 1, only check for suspension if we
2211 * did not already catched a signal. If we
2212 * get an error from the check, the same
2213 * condition is checked by the umtxq_sleep()
2214 * call below, so we should obliterate the
2215 * error to not skip the last loop iteration.
2218 error = thread_check_susp(td, false);
2227 } else if (owner == UMUTEX_RB_NOTRECOV) {
2228 error = ENOTRECOVERABLE;
2235 * If we caught a signal, we have retried and now
2241 umtxq_lock(&uq->uq_key);
2243 umtxq_unbusy(&uq->uq_key);
2244 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2247 umtxq_unlock(&uq->uq_key);
2249 mtx_lock(&umtx_lock);
2250 uq->uq_inherited_pri = old_inherited_pri;
2252 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2253 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2255 if (pri > UPRI(uq2->uq_thread))
2256 pri = UPRI(uq2->uq_thread);
2259 if (pri > uq->uq_inherited_pri)
2260 pri = uq->uq_inherited_pri;
2262 sched_lend_user_prio(td, pri);
2264 mtx_unlock(&umtx_lock);
2267 if (error != 0 && error != EOWNERDEAD) {
2268 mtx_lock(&umtx_lock);
2269 uq->uq_inherited_pri = old_inherited_pri;
2271 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2272 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2274 if (pri > UPRI(uq2->uq_thread))
2275 pri = UPRI(uq2->uq_thread);
2278 if (pri > uq->uq_inherited_pri)
2279 pri = uq->uq_inherited_pri;
2281 sched_lend_user_prio(td, pri);
2283 mtx_unlock(&umtx_lock);
2287 umtxq_unbusy_unlocked(&uq->uq_key);
2288 umtx_key_release(&uq->uq_key);
2293 * Unlock a PP mutex.
2296 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2298 struct umtx_key key;
2299 struct umtx_q *uq, *uq2;
2301 uint32_t id, owner, rceiling;
2302 int error, pri, new_inherited_pri, su;
2306 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2309 * Make sure we own this mtx.
2311 error = fueword32(&m->m_owner, &owner);
2315 if ((owner & ~UMUTEX_CONTESTED) != id)
2318 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2323 new_inherited_pri = PRI_MAX;
2325 rceiling = RTP_PRIO_MAX - rceiling;
2326 if (rceiling > RTP_PRIO_MAX)
2328 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2331 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2332 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2339 * For priority protected mutex, always set unlocked state
2340 * to UMUTEX_CONTESTED, so that userland always enters kernel
2341 * to lock the mutex, it is necessary because thread priority
2342 * has to be adjusted for such mutex.
2344 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2349 umtxq_signal(&key, 1);
2356 mtx_lock(&umtx_lock);
2358 uq->uq_inherited_pri = new_inherited_pri;
2360 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2361 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2363 if (pri > UPRI(uq2->uq_thread))
2364 pri = UPRI(uq2->uq_thread);
2367 if (pri > uq->uq_inherited_pri)
2368 pri = uq->uq_inherited_pri;
2370 sched_lend_user_prio(td, pri);
2372 mtx_unlock(&umtx_lock);
2374 umtx_key_release(&key);
2379 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2380 uint32_t *old_ceiling)
2383 uint32_t flags, id, owner, save_ceiling;
2386 error = fueword32(&m->m_flags, &flags);
2389 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2391 if (ceiling > RTP_PRIO_MAX)
2395 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2396 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2400 umtxq_lock(&uq->uq_key);
2401 umtxq_busy(&uq->uq_key);
2402 umtxq_unlock(&uq->uq_key);
2404 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2410 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2411 id | UMUTEX_CONTESTED);
2418 MPASS(owner == UMUTEX_CONTESTED);
2419 rv = suword32(&m->m_ceilings[0], ceiling);
2420 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2421 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2425 if ((owner & ~UMUTEX_CONTESTED) == id) {
2426 rv = suword32(&m->m_ceilings[0], ceiling);
2427 error = rv == 0 ? 0 : EFAULT;
2431 if (owner == UMUTEX_RB_OWNERDEAD) {
2434 } else if (owner == UMUTEX_RB_NOTRECOV) {
2435 error = ENOTRECOVERABLE;
2440 * If we caught a signal, we have retried and now
2447 * We set the contested bit, sleep. Otherwise the lock changed
2448 * and we need to retry or we lost a race to the thread
2449 * unlocking the umtx.
2451 umtxq_lock(&uq->uq_key);
2453 umtxq_unbusy(&uq->uq_key);
2454 error = umtxq_sleep(uq, "umtxpp", NULL);
2456 umtxq_unlock(&uq->uq_key);
2458 umtxq_lock(&uq->uq_key);
2460 umtxq_signal(&uq->uq_key, INT_MAX);
2461 umtxq_unbusy(&uq->uq_key);
2462 umtxq_unlock(&uq->uq_key);
2463 umtx_key_release(&uq->uq_key);
2464 if (error == 0 && old_ceiling != NULL) {
2465 rv = suword32(old_ceiling, save_ceiling);
2466 error = rv == 0 ? 0 : EFAULT;
2472 * Lock a userland POSIX mutex.
2475 do_lock_umutex(struct thread *td, struct umutex *m,
2476 struct _umtx_time *timeout, int mode)
2481 error = fueword32(&m->m_flags, &flags);
2485 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2487 error = do_lock_normal(td, m, flags, timeout, mode);
2489 case UMUTEX_PRIO_INHERIT:
2490 error = do_lock_pi(td, m, flags, timeout, mode);
2492 case UMUTEX_PRIO_PROTECT:
2493 error = do_lock_pp(td, m, flags, timeout, mode);
2498 if (timeout == NULL) {
2499 if (error == EINTR && mode != _UMUTEX_WAIT)
2502 /* Timed-locking is not restarted. */
2503 if (error == ERESTART)
2510 * Unlock a userland POSIX mutex.
2513 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2518 error = fueword32(&m->m_flags, &flags);
2522 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2524 return (do_unlock_normal(td, m, flags, rb));
2525 case UMUTEX_PRIO_INHERIT:
2526 return (do_unlock_pi(td, m, flags, rb));
2527 case UMUTEX_PRIO_PROTECT:
2528 return (do_unlock_pp(td, m, flags, rb));
2535 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2536 struct timespec *timeout, u_long wflags)
2538 struct abs_timeout timo;
2540 uint32_t flags, clockid, hasw;
2544 error = fueword32(&cv->c_flags, &flags);
2547 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2551 if ((wflags & CVWAIT_CLOCKID) != 0) {
2552 error = fueword32(&cv->c_clockid, &clockid);
2554 umtx_key_release(&uq->uq_key);
2557 if (clockid < CLOCK_REALTIME ||
2558 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2559 /* hmm, only HW clock id will work. */
2560 umtx_key_release(&uq->uq_key);
2564 clockid = CLOCK_REALTIME;
2567 umtxq_lock(&uq->uq_key);
2568 umtxq_busy(&uq->uq_key);
2570 umtxq_unlock(&uq->uq_key);
2573 * Set c_has_waiters to 1 before releasing user mutex, also
2574 * don't modify cache line when unnecessary.
2576 error = fueword32(&cv->c_has_waiters, &hasw);
2577 if (error == 0 && hasw == 0)
2578 suword32(&cv->c_has_waiters, 1);
2580 umtxq_unbusy_unlocked(&uq->uq_key);
2582 error = do_unlock_umutex(td, m, false);
2584 if (timeout != NULL)
2585 abs_timeout_init(&timo, clockid, (wflags & CVWAIT_ABSTIME) != 0,
2588 umtxq_lock(&uq->uq_key);
2590 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2594 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2598 * This must be timeout,interrupted by signal or
2599 * surprious wakeup, clear c_has_waiter flag when
2602 umtxq_busy(&uq->uq_key);
2603 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2604 int oldlen = uq->uq_cur_queue->length;
2607 umtxq_unlock(&uq->uq_key);
2608 suword32(&cv->c_has_waiters, 0);
2609 umtxq_lock(&uq->uq_key);
2612 umtxq_unbusy(&uq->uq_key);
2613 if (error == ERESTART)
2617 umtxq_unlock(&uq->uq_key);
2618 umtx_key_release(&uq->uq_key);
2623 * Signal a userland condition variable.
2626 do_cv_signal(struct thread *td, struct ucond *cv)
2628 struct umtx_key key;
2629 int error, cnt, nwake;
2632 error = fueword32(&cv->c_flags, &flags);
2635 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2639 cnt = umtxq_count(&key);
2640 nwake = umtxq_signal(&key, 1);
2643 error = suword32(&cv->c_has_waiters, 0);
2650 umtx_key_release(&key);
2655 do_cv_broadcast(struct thread *td, struct ucond *cv)
2657 struct umtx_key key;
2661 error = fueword32(&cv->c_flags, &flags);
2664 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2669 umtxq_signal(&key, INT_MAX);
2672 error = suword32(&cv->c_has_waiters, 0);
2676 umtxq_unbusy_unlocked(&key);
2678 umtx_key_release(&key);
2683 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag,
2684 struct _umtx_time *timeout)
2686 struct abs_timeout timo;
2688 uint32_t flags, wrflags;
2689 int32_t state, oldstate;
2690 int32_t blocked_readers;
2691 int error, error1, rv;
2694 error = fueword32(&rwlock->rw_flags, &flags);
2697 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2701 if (timeout != NULL)
2702 abs_timeout_init2(&timo, timeout);
2704 wrflags = URWLOCK_WRITE_OWNER;
2705 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2706 wrflags |= URWLOCK_WRITE_WAITERS;
2709 rv = fueword32(&rwlock->rw_state, &state);
2711 umtx_key_release(&uq->uq_key);
2715 /* try to lock it */
2716 while (!(state & wrflags)) {
2717 if (__predict_false(URWLOCK_READER_COUNT(state) ==
2718 URWLOCK_MAX_READERS)) {
2719 umtx_key_release(&uq->uq_key);
2722 rv = casueword32(&rwlock->rw_state, state,
2723 &oldstate, state + 1);
2725 umtx_key_release(&uq->uq_key);
2729 MPASS(oldstate == state);
2730 umtx_key_release(&uq->uq_key);
2733 error = thread_check_susp(td, true);
2742 /* grab monitor lock */
2743 umtxq_lock(&uq->uq_key);
2744 umtxq_busy(&uq->uq_key);
2745 umtxq_unlock(&uq->uq_key);
2748 * re-read the state, in case it changed between the try-lock above
2749 * and the check below
2751 rv = fueword32(&rwlock->rw_state, &state);
2755 /* set read contention bit */
2756 while (error == 0 && (state & wrflags) &&
2757 !(state & URWLOCK_READ_WAITERS)) {
2758 rv = casueword32(&rwlock->rw_state, state,
2759 &oldstate, state | URWLOCK_READ_WAITERS);
2765 MPASS(oldstate == state);
2769 error = thread_check_susp(td, false);
2774 umtxq_unbusy_unlocked(&uq->uq_key);
2778 /* state is changed while setting flags, restart */
2779 if (!(state & wrflags)) {
2780 umtxq_unbusy_unlocked(&uq->uq_key);
2781 error = thread_check_susp(td, true);
2789 * Contention bit is set, before sleeping, increase
2790 * read waiter count.
2792 rv = fueword32(&rwlock->rw_blocked_readers,
2795 umtxq_unbusy_unlocked(&uq->uq_key);
2799 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2801 while (state & wrflags) {
2802 umtxq_lock(&uq->uq_key);
2804 umtxq_unbusy(&uq->uq_key);
2806 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2809 umtxq_busy(&uq->uq_key);
2811 umtxq_unlock(&uq->uq_key);
2814 rv = fueword32(&rwlock->rw_state, &state);
2821 /* decrease read waiter count, and may clear read contention bit */
2822 rv = fueword32(&rwlock->rw_blocked_readers,
2825 umtxq_unbusy_unlocked(&uq->uq_key);
2829 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2830 if (blocked_readers == 1) {
2831 rv = fueword32(&rwlock->rw_state, &state);
2833 umtxq_unbusy_unlocked(&uq->uq_key);
2838 rv = casueword32(&rwlock->rw_state, state,
2839 &oldstate, state & ~URWLOCK_READ_WAITERS);
2845 MPASS(oldstate == state);
2849 error1 = thread_check_susp(td, false);
2858 umtxq_unbusy_unlocked(&uq->uq_key);
2862 umtx_key_release(&uq->uq_key);
2863 if (error == ERESTART)
2869 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2871 struct abs_timeout timo;
2874 int32_t state, oldstate;
2875 int32_t blocked_writers;
2876 int32_t blocked_readers;
2877 int error, error1, rv;
2880 error = fueword32(&rwlock->rw_flags, &flags);
2883 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2887 if (timeout != NULL)
2888 abs_timeout_init2(&timo, timeout);
2890 blocked_readers = 0;
2892 rv = fueword32(&rwlock->rw_state, &state);
2894 umtx_key_release(&uq->uq_key);
2897 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
2898 URWLOCK_READER_COUNT(state) == 0) {
2899 rv = casueword32(&rwlock->rw_state, state,
2900 &oldstate, state | URWLOCK_WRITE_OWNER);
2902 umtx_key_release(&uq->uq_key);
2906 MPASS(oldstate == state);
2907 umtx_key_release(&uq->uq_key);
2911 error = thread_check_susp(td, true);
2917 if ((state & (URWLOCK_WRITE_OWNER |
2918 URWLOCK_WRITE_WAITERS)) == 0 &&
2919 blocked_readers != 0) {
2920 umtxq_lock(&uq->uq_key);
2921 umtxq_busy(&uq->uq_key);
2922 umtxq_signal_queue(&uq->uq_key, INT_MAX,
2924 umtxq_unbusy(&uq->uq_key);
2925 umtxq_unlock(&uq->uq_key);
2931 /* grab monitor lock */
2932 umtxq_lock(&uq->uq_key);
2933 umtxq_busy(&uq->uq_key);
2934 umtxq_unlock(&uq->uq_key);
2937 * Re-read the state, in case it changed between the
2938 * try-lock above and the check below.
2940 rv = fueword32(&rwlock->rw_state, &state);
2944 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
2945 URWLOCK_READER_COUNT(state) != 0) &&
2946 (state & URWLOCK_WRITE_WAITERS) == 0) {
2947 rv = casueword32(&rwlock->rw_state, state,
2948 &oldstate, state | URWLOCK_WRITE_WAITERS);
2954 MPASS(oldstate == state);
2958 error = thread_check_susp(td, false);
2963 umtxq_unbusy_unlocked(&uq->uq_key);
2967 if ((state & URWLOCK_WRITE_OWNER) == 0 &&
2968 URWLOCK_READER_COUNT(state) == 0) {
2969 umtxq_unbusy_unlocked(&uq->uq_key);
2970 error = thread_check_susp(td, false);
2976 rv = fueword32(&rwlock->rw_blocked_writers,
2979 umtxq_unbusy_unlocked(&uq->uq_key);
2983 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1);
2985 while ((state & URWLOCK_WRITE_OWNER) ||
2986 URWLOCK_READER_COUNT(state) != 0) {
2987 umtxq_lock(&uq->uq_key);
2988 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2989 umtxq_unbusy(&uq->uq_key);
2991 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2994 umtxq_busy(&uq->uq_key);
2995 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2996 umtxq_unlock(&uq->uq_key);
2999 rv = fueword32(&rwlock->rw_state, &state);
3006 rv = fueword32(&rwlock->rw_blocked_writers,
3009 umtxq_unbusy_unlocked(&uq->uq_key);
3013 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
3014 if (blocked_writers == 1) {
3015 rv = fueword32(&rwlock->rw_state, &state);
3017 umtxq_unbusy_unlocked(&uq->uq_key);
3022 rv = casueword32(&rwlock->rw_state, state,
3023 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
3029 MPASS(oldstate == state);
3033 error1 = thread_check_susp(td, false);
3035 * We are leaving the URWLOCK_WRITE_WAITERS
3036 * behind, but this should not harm the
3045 rv = fueword32(&rwlock->rw_blocked_readers,
3048 umtxq_unbusy_unlocked(&uq->uq_key);
3053 blocked_readers = 0;
3055 umtxq_unbusy_unlocked(&uq->uq_key);
3058 umtx_key_release(&uq->uq_key);
3059 if (error == ERESTART)
3065 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3069 int32_t state, oldstate;
3070 int error, rv, q, count;
3073 error = fueword32(&rwlock->rw_flags, &flags);
3076 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3080 error = fueword32(&rwlock->rw_state, &state);
3085 if (state & URWLOCK_WRITE_OWNER) {
3087 rv = casueword32(&rwlock->rw_state, state,
3088 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3095 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3099 error = thread_check_susp(td, true);
3105 } else if (URWLOCK_READER_COUNT(state) != 0) {
3107 rv = casueword32(&rwlock->rw_state, state,
3108 &oldstate, state - 1);
3115 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3119 error = thread_check_susp(td, true);
3132 if (!(flags & URWLOCK_PREFER_READER)) {
3133 if (state & URWLOCK_WRITE_WAITERS) {
3135 q = UMTX_EXCLUSIVE_QUEUE;
3136 } else if (state & URWLOCK_READ_WAITERS) {
3138 q = UMTX_SHARED_QUEUE;
3141 if (state & URWLOCK_READ_WAITERS) {
3143 q = UMTX_SHARED_QUEUE;
3144 } else if (state & URWLOCK_WRITE_WAITERS) {
3146 q = UMTX_EXCLUSIVE_QUEUE;
3151 umtxq_lock(&uq->uq_key);
3152 umtxq_busy(&uq->uq_key);
3153 umtxq_signal_queue(&uq->uq_key, count, q);
3154 umtxq_unbusy(&uq->uq_key);
3155 umtxq_unlock(&uq->uq_key);
3158 umtx_key_release(&uq->uq_key);
3162 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3164 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3166 struct abs_timeout timo;
3168 uint32_t flags, count, count1;
3172 error = fueword32(&sem->_flags, &flags);
3175 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3179 if (timeout != NULL)
3180 abs_timeout_init2(&timo, timeout);
3183 umtxq_lock(&uq->uq_key);
3184 umtxq_busy(&uq->uq_key);
3186 umtxq_unlock(&uq->uq_key);
3187 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3189 rv1 = fueword32(&sem->_count, &count);
3190 if (rv == -1 || (rv == 0 && (rv1 == -1 || count != 0)) ||
3191 (rv == 1 && count1 == 0)) {
3192 umtxq_lock(&uq->uq_key);
3193 umtxq_unbusy(&uq->uq_key);
3195 umtxq_unlock(&uq->uq_key);
3197 rv = thread_check_susp(td, true);
3205 error = rv == -1 ? EFAULT : 0;
3208 umtxq_lock(&uq->uq_key);
3209 umtxq_unbusy(&uq->uq_key);
3211 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3213 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3217 /* A relative timeout cannot be restarted. */
3218 if (error == ERESTART && timeout != NULL &&
3219 (timeout->_flags & UMTX_ABSTIME) == 0)
3222 umtxq_unlock(&uq->uq_key);
3224 umtx_key_release(&uq->uq_key);
3229 * Signal a userland semaphore.
3232 do_sem_wake(struct thread *td, struct _usem *sem)
3234 struct umtx_key key;
3238 error = fueword32(&sem->_flags, &flags);
3241 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3245 cnt = umtxq_count(&key);
3248 * Check if count is greater than 0, this means the memory is
3249 * still being referenced by user code, so we can safely
3250 * update _has_waiters flag.
3254 error = suword32(&sem->_has_waiters, 0);
3259 umtxq_signal(&key, 1);
3263 umtx_key_release(&key);
3269 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3271 struct abs_timeout timo;
3273 uint32_t count, flags;
3277 flags = fuword32(&sem->_flags);
3278 if (timeout != NULL)
3279 abs_timeout_init2(&timo, timeout);
3282 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3285 umtxq_lock(&uq->uq_key);
3286 umtxq_busy(&uq->uq_key);
3288 umtxq_unlock(&uq->uq_key);
3289 rv = fueword32(&sem->_count, &count);
3291 umtxq_lock(&uq->uq_key);
3292 umtxq_unbusy(&uq->uq_key);
3294 umtxq_unlock(&uq->uq_key);
3295 umtx_key_release(&uq->uq_key);
3299 if (USEM_COUNT(count) != 0) {
3300 umtxq_lock(&uq->uq_key);
3301 umtxq_unbusy(&uq->uq_key);
3303 umtxq_unlock(&uq->uq_key);
3304 umtx_key_release(&uq->uq_key);
3307 if (count == USEM_HAS_WAITERS)
3309 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3312 umtxq_lock(&uq->uq_key);
3313 umtxq_unbusy(&uq->uq_key);
3315 umtxq_unlock(&uq->uq_key);
3316 umtx_key_release(&uq->uq_key);
3319 rv = thread_check_susp(td, true);
3324 umtxq_lock(&uq->uq_key);
3325 umtxq_unbusy(&uq->uq_key);
3327 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3329 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3333 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3334 /* A relative timeout cannot be restarted. */
3335 if (error == ERESTART)
3337 if (error == EINTR) {
3338 abs_timeout_update(&timo);
3339 timespecsub(&timo.end, &timo.cur,
3340 &timeout->_timeout);
3344 umtxq_unlock(&uq->uq_key);
3345 umtx_key_release(&uq->uq_key);
3350 * Signal a userland semaphore.
3353 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3355 struct umtx_key key;
3357 uint32_t count, flags;
3359 rv = fueword32(&sem->_flags, &flags);
3362 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3366 cnt = umtxq_count(&key);
3369 * If this was the last sleeping thread, clear the waiters
3374 rv = fueword32(&sem->_count, &count);
3375 while (rv != -1 && count & USEM_HAS_WAITERS) {
3376 rv = casueword32(&sem->_count, count, &count,
3377 count & ~USEM_HAS_WAITERS);
3379 rv = thread_check_susp(td, true);
3392 umtxq_signal(&key, 1);
3396 umtx_key_release(&key);
3401 umtx_copyin_timeout(const void *uaddr, struct timespec *tsp)
3405 error = copyin(uaddr, tsp, sizeof(*tsp));
3407 if (tsp->tv_sec < 0 ||
3408 tsp->tv_nsec >= 1000000000 ||
3416 umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp)
3420 if (size <= sizeof(tp->_timeout)) {
3421 tp->_clockid = CLOCK_REALTIME;
3423 error = copyin(uaddr, &tp->_timeout, sizeof(tp->_timeout));
3425 error = copyin(uaddr, tp, sizeof(*tp));
3428 if (tp->_timeout.tv_sec < 0 ||
3429 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3435 umtx_copyin_robust_lists(const void *uaddr, size_t size,
3436 struct umtx_robust_lists_params *rb)
3439 if (size > sizeof(*rb))
3441 return (copyin(uaddr, rb, size));
3445 umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp)
3449 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
3450 * and we're only called if sz >= sizeof(timespec) as supplied in the
3453 KASSERT(sz >= sizeof(*tsp),
3454 ("umtx_copyops specifies incorrect sizes"));
3456 return (copyout(tsp, uaddr, sizeof(*tsp)));
3460 __umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap,
3461 const struct umtx_copyops *ops __unused)
3464 return (EOPNOTSUPP);
3468 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap,
3469 const struct umtx_copyops *ops)
3471 struct _umtx_time timeout, *tm_p;
3474 if (uap->uaddr2 == NULL)
3477 error = ops->copyin_umtx_time(
3478 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3483 return (do_wait(td, uap->obj, uap->val, tm_p, ops->compat32, 0));
3487 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap,
3488 const struct umtx_copyops *ops)
3490 struct _umtx_time timeout, *tm_p;
3493 if (uap->uaddr2 == NULL)
3496 error = ops->copyin_umtx_time(
3497 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3502 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3506 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap,
3507 const struct umtx_copyops *ops)
3509 struct _umtx_time *tm_p, timeout;
3512 if (uap->uaddr2 == NULL)
3515 error = ops->copyin_umtx_time(
3516 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3521 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3525 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap,
3526 const struct umtx_copyops *ops __unused)
3529 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3532 #define BATCH_SIZE 128
3534 __umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap)
3536 char *uaddrs[BATCH_SIZE], **upp;
3537 int count, error, i, pos, tocopy;
3539 upp = (char **)uap->obj;
3541 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3543 tocopy = MIN(count, BATCH_SIZE);
3544 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3547 for (i = 0; i < tocopy; ++i) {
3548 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3556 __umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3558 uint32_t uaddrs[BATCH_SIZE], *upp;
3559 int count, error, i, pos, tocopy;
3561 upp = (uint32_t *)uap->obj;
3563 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3565 tocopy = MIN(count, BATCH_SIZE);
3566 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
3569 for (i = 0; i < tocopy; ++i) {
3570 kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i],
3579 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap,
3580 const struct umtx_copyops *ops)
3584 return (__umtx_op_nwake_private_compat32(td, uap));
3585 return (__umtx_op_nwake_private_native(td, uap));
3589 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap,
3590 const struct umtx_copyops *ops __unused)
3593 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3597 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap,
3598 const struct umtx_copyops *ops)
3600 struct _umtx_time *tm_p, timeout;
3603 /* Allow a null timespec (wait forever). */
3604 if (uap->uaddr2 == NULL)
3607 error = ops->copyin_umtx_time(
3608 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3613 return (do_lock_umutex(td, uap->obj, tm_p, 0));
3617 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap,
3618 const struct umtx_copyops *ops __unused)
3621 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
3625 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap,
3626 const struct umtx_copyops *ops)
3628 struct _umtx_time *tm_p, timeout;
3631 /* Allow a null timespec (wait forever). */
3632 if (uap->uaddr2 == NULL)
3635 error = ops->copyin_umtx_time(
3636 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3641 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
3645 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap,
3646 const struct umtx_copyops *ops __unused)
3649 return (do_wake_umutex(td, uap->obj));
3653 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap,
3654 const struct umtx_copyops *ops __unused)
3657 return (do_unlock_umutex(td, uap->obj, false));
3661 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap,
3662 const struct umtx_copyops *ops __unused)
3665 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
3669 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap,
3670 const struct umtx_copyops *ops)
3672 struct timespec *ts, timeout;
3675 /* Allow a null timespec (wait forever). */
3676 if (uap->uaddr2 == NULL)
3679 error = ops->copyin_timeout(uap->uaddr2, &timeout);
3684 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3688 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap,
3689 const struct umtx_copyops *ops __unused)
3692 return (do_cv_signal(td, uap->obj));
3696 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap,
3697 const struct umtx_copyops *ops __unused)
3700 return (do_cv_broadcast(td, uap->obj));
3704 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap,
3705 const struct umtx_copyops *ops)
3707 struct _umtx_time timeout;
3710 /* Allow a null timespec (wait forever). */
3711 if (uap->uaddr2 == NULL) {
3712 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3714 error = ops->copyin_umtx_time(uap->uaddr2,
3715 (size_t)uap->uaddr1, &timeout);
3718 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3724 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap,
3725 const struct umtx_copyops *ops)
3727 struct _umtx_time timeout;
3730 /* Allow a null timespec (wait forever). */
3731 if (uap->uaddr2 == NULL) {
3732 error = do_rw_wrlock(td, uap->obj, 0);
3734 error = ops->copyin_umtx_time(uap->uaddr2,
3735 (size_t)uap->uaddr1, &timeout);
3739 error = do_rw_wrlock(td, uap->obj, &timeout);
3745 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap,
3746 const struct umtx_copyops *ops __unused)
3749 return (do_rw_unlock(td, uap->obj));
3752 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3754 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap,
3755 const struct umtx_copyops *ops)
3757 struct _umtx_time *tm_p, timeout;
3760 /* Allow a null timespec (wait forever). */
3761 if (uap->uaddr2 == NULL)
3764 error = ops->copyin_umtx_time(
3765 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3770 return (do_sem_wait(td, uap->obj, tm_p));
3774 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap,
3775 const struct umtx_copyops *ops __unused)
3778 return (do_sem_wake(td, uap->obj));
3783 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap,
3784 const struct umtx_copyops *ops __unused)
3787 return (do_wake2_umutex(td, uap->obj, uap->val));
3791 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap,
3792 const struct umtx_copyops *ops)
3794 struct _umtx_time *tm_p, timeout;
3798 /* Allow a null timespec (wait forever). */
3799 if (uap->uaddr2 == NULL) {
3803 uasize = (size_t)uap->uaddr1;
3804 error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout);
3809 error = do_sem2_wait(td, uap->obj, tm_p);
3810 if (error == EINTR && uap->uaddr2 != NULL &&
3811 (timeout._flags & UMTX_ABSTIME) == 0 &&
3812 uasize >= ops->umtx_time_sz + ops->timespec_sz) {
3813 error = ops->copyout_timeout(
3814 (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz),
3815 uasize - ops->umtx_time_sz, &timeout._timeout);
3825 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap,
3826 const struct umtx_copyops *ops __unused)
3829 return (do_sem2_wake(td, uap->obj));
3832 #define USHM_OBJ_UMTX(o) \
3833 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
3835 #define USHMF_REG_LINKED 0x0001
3836 #define USHMF_OBJ_LINKED 0x0002
3837 struct umtx_shm_reg {
3838 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
3839 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
3840 struct umtx_key ushm_key;
3841 struct ucred *ushm_cred;
3842 struct shmfd *ushm_obj;
3847 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
3848 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
3850 static uma_zone_t umtx_shm_reg_zone;
3851 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
3852 static struct mtx umtx_shm_lock;
3853 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
3854 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
3856 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
3859 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
3861 struct umtx_shm_reg_head d;
3862 struct umtx_shm_reg *reg, *reg1;
3865 mtx_lock(&umtx_shm_lock);
3866 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
3867 mtx_unlock(&umtx_shm_lock);
3868 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
3869 TAILQ_REMOVE(&d, reg, ushm_reg_link);
3870 umtx_shm_free_reg(reg);
3874 static struct task umtx_shm_reg_delfree_task =
3875 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
3877 static struct umtx_shm_reg *
3878 umtx_shm_find_reg_locked(const struct umtx_key *key)
3880 struct umtx_shm_reg *reg;
3881 struct umtx_shm_reg_head *reg_head;
3883 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
3884 mtx_assert(&umtx_shm_lock, MA_OWNED);
3885 reg_head = &umtx_shm_registry[key->hash];
3886 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
3887 KASSERT(reg->ushm_key.shared,
3888 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
3889 if (reg->ushm_key.info.shared.object ==
3890 key->info.shared.object &&
3891 reg->ushm_key.info.shared.offset ==
3892 key->info.shared.offset) {
3893 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
3894 KASSERT(reg->ushm_refcnt > 0,
3895 ("reg %p refcnt 0 onlist", reg));
3896 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
3897 ("reg %p not linked", reg));
3905 static struct umtx_shm_reg *
3906 umtx_shm_find_reg(const struct umtx_key *key)
3908 struct umtx_shm_reg *reg;
3910 mtx_lock(&umtx_shm_lock);
3911 reg = umtx_shm_find_reg_locked(key);
3912 mtx_unlock(&umtx_shm_lock);
3917 umtx_shm_free_reg(struct umtx_shm_reg *reg)
3920 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
3921 crfree(reg->ushm_cred);
3922 shm_drop(reg->ushm_obj);
3923 uma_zfree(umtx_shm_reg_zone, reg);
3927 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
3931 mtx_assert(&umtx_shm_lock, MA_OWNED);
3932 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
3934 res = reg->ushm_refcnt == 0;
3936 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
3937 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
3938 reg, ushm_reg_link);
3939 reg->ushm_flags &= ~USHMF_REG_LINKED;
3941 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
3942 LIST_REMOVE(reg, ushm_obj_link);
3943 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
3950 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
3956 object = reg->ushm_obj->shm_object;
3957 VM_OBJECT_WLOCK(object);
3958 object->flags |= OBJ_UMTXDEAD;
3959 VM_OBJECT_WUNLOCK(object);
3961 mtx_lock(&umtx_shm_lock);
3962 dofree = umtx_shm_unref_reg_locked(reg, force);
3963 mtx_unlock(&umtx_shm_lock);
3965 umtx_shm_free_reg(reg);
3969 umtx_shm_object_init(vm_object_t object)
3972 LIST_INIT(USHM_OBJ_UMTX(object));
3976 umtx_shm_object_terminated(vm_object_t object)
3978 struct umtx_shm_reg *reg, *reg1;
3981 if (LIST_EMPTY(USHM_OBJ_UMTX(object)))
3985 mtx_lock(&umtx_shm_lock);
3986 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
3987 if (umtx_shm_unref_reg_locked(reg, true)) {
3988 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
3993 mtx_unlock(&umtx_shm_lock);
3995 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
3999 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
4000 struct umtx_shm_reg **res)
4002 struct umtx_shm_reg *reg, *reg1;
4006 reg = umtx_shm_find_reg(key);
4011 cred = td->td_ucred;
4012 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
4014 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
4015 reg->ushm_refcnt = 1;
4016 bcopy(key, ®->ushm_key, sizeof(*key));
4017 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR, false);
4018 reg->ushm_cred = crhold(cred);
4019 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
4021 umtx_shm_free_reg(reg);
4024 mtx_lock(&umtx_shm_lock);
4025 reg1 = umtx_shm_find_reg_locked(key);
4027 mtx_unlock(&umtx_shm_lock);
4028 umtx_shm_free_reg(reg);
4033 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
4034 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
4036 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
4037 mtx_unlock(&umtx_shm_lock);
4043 umtx_shm_alive(struct thread *td, void *addr)
4046 vm_map_entry_t entry;
4053 map = &td->td_proc->p_vmspace->vm_map;
4054 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
4055 &object, &pindex, &prot, &wired);
4056 if (res != KERN_SUCCESS)
4061 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
4062 vm_map_lookup_done(map, entry);
4071 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
4072 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4073 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
4074 for (i = 0; i < nitems(umtx_shm_registry); i++)
4075 TAILQ_INIT(&umtx_shm_registry[i]);
4079 umtx_shm(struct thread *td, void *addr, u_int flags)
4081 struct umtx_key key;
4082 struct umtx_shm_reg *reg;
4086 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
4087 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
4089 if ((flags & UMTX_SHM_ALIVE) != 0)
4090 return (umtx_shm_alive(td, addr));
4091 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
4094 KASSERT(key.shared == 1, ("non-shared key"));
4095 if ((flags & UMTX_SHM_CREAT) != 0) {
4096 error = umtx_shm_create_reg(td, &key, ®);
4098 reg = umtx_shm_find_reg(&key);
4102 umtx_key_release(&key);
4105 KASSERT(reg != NULL, ("no reg"));
4106 if ((flags & UMTX_SHM_DESTROY) != 0) {
4107 umtx_shm_unref_reg(reg, true);
4111 error = mac_posixshm_check_open(td->td_ucred,
4112 reg->ushm_obj, FFLAGS(O_RDWR));
4115 error = shm_access(reg->ushm_obj, td->td_ucred,
4119 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
4121 shm_hold(reg->ushm_obj);
4122 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
4124 td->td_retval[0] = fd;
4128 umtx_shm_unref_reg(reg, false);
4133 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap,
4134 const struct umtx_copyops *ops __unused)
4137 return (umtx_shm(td, uap->uaddr1, uap->val));
4141 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap,
4142 const struct umtx_copyops *ops)
4144 struct umtx_robust_lists_params rb;
4147 if (ops->compat32) {
4148 if ((td->td_pflags2 & TDP2_COMPAT32RB) == 0 &&
4149 (td->td_rb_list != 0 || td->td_rbp_list != 0 ||
4150 td->td_rb_inact != 0))
4152 } else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0) {
4156 bzero(&rb, sizeof(rb));
4157 error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb);
4162 td->td_pflags2 |= TDP2_COMPAT32RB;
4164 td->td_rb_list = rb.robust_list_offset;
4165 td->td_rbp_list = rb.robust_priv_list_offset;
4166 td->td_rb_inact = rb.robust_inact_offset;
4170 #if defined(__i386__) || defined(__amd64__)
4172 * Provide the standard 32-bit definitions for x86, since native/compat32 use a
4173 * 32-bit time_t there. Other architectures just need the i386 definitions
4174 * along with their standard compat32.
4176 struct timespecx32 {
4181 struct umtx_timex32 {
4182 struct timespecx32 _timeout;
4188 #define timespeci386 timespec32
4189 #define umtx_timei386 umtx_time32
4191 #else /* !__i386__ && !__amd64__ */
4192 /* 32-bit architectures can emulate i386, so define these almost everywhere. */
4193 struct timespeci386 {
4198 struct umtx_timei386 {
4199 struct timespeci386 _timeout;
4204 #if defined(__LP64__)
4205 #define timespecx32 timespec32
4206 #define umtx_timex32 umtx_time32
4211 umtx_copyin_robust_lists32(const void *uaddr, size_t size,
4212 struct umtx_robust_lists_params *rbp)
4214 struct umtx_robust_lists_params_compat32 rb32;
4217 if (size > sizeof(rb32))
4219 bzero(&rb32, sizeof(rb32));
4220 error = copyin(uaddr, &rb32, size);
4223 CP(rb32, *rbp, robust_list_offset);
4224 CP(rb32, *rbp, robust_priv_list_offset);
4225 CP(rb32, *rbp, robust_inact_offset);
4231 umtx_copyin_timeouti386(const void *uaddr, struct timespec *tsp)
4233 struct timespeci386 ts32;
4236 error = copyin(uaddr, &ts32, sizeof(ts32));
4238 if (ts32.tv_sec < 0 ||
4239 ts32.tv_nsec >= 1000000000 ||
4243 CP(ts32, *tsp, tv_sec);
4244 CP(ts32, *tsp, tv_nsec);
4251 umtx_copyin_umtx_timei386(const void *uaddr, size_t size, struct _umtx_time *tp)
4253 struct umtx_timei386 t32;
4256 t32._clockid = CLOCK_REALTIME;
4258 if (size <= sizeof(t32._timeout))
4259 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4261 error = copyin(uaddr, &t32, sizeof(t32));
4264 if (t32._timeout.tv_sec < 0 ||
4265 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4267 TS_CP(t32, *tp, _timeout);
4268 CP(t32, *tp, _flags);
4269 CP(t32, *tp, _clockid);
4274 umtx_copyout_timeouti386(void *uaddr, size_t sz, struct timespec *tsp)
4276 struct timespeci386 remain32 = {
4277 .tv_sec = tsp->tv_sec,
4278 .tv_nsec = tsp->tv_nsec,
4282 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4283 * and we're only called if sz >= sizeof(timespec) as supplied in the
4286 KASSERT(sz >= sizeof(remain32),
4287 ("umtx_copyops specifies incorrect sizes"));
4289 return (copyout(&remain32, uaddr, sizeof(remain32)));
4291 #endif /* !__i386__ */
4293 #if defined(__i386__) || defined(__LP64__)
4295 umtx_copyin_timeoutx32(const void *uaddr, struct timespec *tsp)
4297 struct timespecx32 ts32;
4300 error = copyin(uaddr, &ts32, sizeof(ts32));
4302 if (ts32.tv_sec < 0 ||
4303 ts32.tv_nsec >= 1000000000 ||
4307 CP(ts32, *tsp, tv_sec);
4308 CP(ts32, *tsp, tv_nsec);
4315 umtx_copyin_umtx_timex32(const void *uaddr, size_t size, struct _umtx_time *tp)
4317 struct umtx_timex32 t32;
4320 t32._clockid = CLOCK_REALTIME;
4322 if (size <= sizeof(t32._timeout))
4323 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4325 error = copyin(uaddr, &t32, sizeof(t32));
4328 if (t32._timeout.tv_sec < 0 ||
4329 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4331 TS_CP(t32, *tp, _timeout);
4332 CP(t32, *tp, _flags);
4333 CP(t32, *tp, _clockid);
4338 umtx_copyout_timeoutx32(void *uaddr, size_t sz, struct timespec *tsp)
4340 struct timespecx32 remain32 = {
4341 .tv_sec = tsp->tv_sec,
4342 .tv_nsec = tsp->tv_nsec,
4346 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4347 * and we're only called if sz >= sizeof(timespec) as supplied in the
4350 KASSERT(sz >= sizeof(remain32),
4351 ("umtx_copyops specifies incorrect sizes"));
4353 return (copyout(&remain32, uaddr, sizeof(remain32)));
4355 #endif /* __i386__ || __LP64__ */
4357 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap,
4358 const struct umtx_copyops *umtx_ops);
4360 static const _umtx_op_func op_table[] = {
4361 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4362 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4363 [UMTX_OP_WAIT] = __umtx_op_wait,
4364 [UMTX_OP_WAKE] = __umtx_op_wake,
4365 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4366 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
4367 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4368 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4369 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4370 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4371 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4372 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4373 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4374 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4375 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4376 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4377 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4378 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4379 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4380 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4381 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4382 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4384 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4385 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4387 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4388 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4389 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4390 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4391 [UMTX_OP_SHM] = __umtx_op_shm,
4392 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4395 static const struct umtx_copyops umtx_native_ops = {
4396 .copyin_timeout = umtx_copyin_timeout,
4397 .copyin_umtx_time = umtx_copyin_umtx_time,
4398 .copyin_robust_lists = umtx_copyin_robust_lists,
4399 .copyout_timeout = umtx_copyout_timeout,
4400 .timespec_sz = sizeof(struct timespec),
4401 .umtx_time_sz = sizeof(struct _umtx_time),
4405 static const struct umtx_copyops umtx_native_opsi386 = {
4406 .copyin_timeout = umtx_copyin_timeouti386,
4407 .copyin_umtx_time = umtx_copyin_umtx_timei386,
4408 .copyin_robust_lists = umtx_copyin_robust_lists32,
4409 .copyout_timeout = umtx_copyout_timeouti386,
4410 .timespec_sz = sizeof(struct timespeci386),
4411 .umtx_time_sz = sizeof(struct umtx_timei386),
4416 #if defined(__i386__) || defined(__LP64__)
4417 /* i386 can emulate other 32-bit archs, too! */
4418 static const struct umtx_copyops umtx_native_opsx32 = {
4419 .copyin_timeout = umtx_copyin_timeoutx32,
4420 .copyin_umtx_time = umtx_copyin_umtx_timex32,
4421 .copyin_robust_lists = umtx_copyin_robust_lists32,
4422 .copyout_timeout = umtx_copyout_timeoutx32,
4423 .timespec_sz = sizeof(struct timespecx32),
4424 .umtx_time_sz = sizeof(struct umtx_timex32),
4428 #ifdef COMPAT_FREEBSD32
4430 #define umtx_native_ops32 umtx_native_opsi386
4432 #define umtx_native_ops32 umtx_native_opsx32
4434 #endif /* COMPAT_FREEBSD32 */
4435 #endif /* __i386__ || __LP64__ */
4437 #define UMTX_OP__FLAGS (UMTX_OP__32BIT | UMTX_OP__I386)
4440 kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val,
4441 void *uaddr1, void *uaddr2, const struct umtx_copyops *ops)
4443 struct _umtx_op_args uap = {
4445 .op = op & ~UMTX_OP__FLAGS,
4451 if ((uap.op >= nitems(op_table)))
4453 return ((*op_table[uap.op])(td, &uap, ops));
4457 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4459 static const struct umtx_copyops *umtx_ops;
4461 umtx_ops = &umtx_native_ops;
4463 if ((uap->op & (UMTX_OP__32BIT | UMTX_OP__I386)) != 0) {
4464 if ((uap->op & UMTX_OP__I386) != 0)
4465 umtx_ops = &umtx_native_opsi386;
4467 umtx_ops = &umtx_native_opsx32;
4469 #elif !defined(__i386__)
4470 /* We consider UMTX_OP__32BIT a nop on !i386 ILP32. */
4471 if ((uap->op & UMTX_OP__I386) != 0)
4472 umtx_ops = &umtx_native_opsi386;
4474 /* Likewise, UMTX_OP__I386 is a nop on i386. */
4475 if ((uap->op & UMTX_OP__32BIT) != 0)
4476 umtx_ops = &umtx_native_opsx32;
4478 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4479 uap->uaddr2, umtx_ops));
4482 #ifdef COMPAT_FREEBSD32
4484 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap)
4487 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr,
4488 uap->uaddr2, &umtx_native_ops32));
4493 umtx_thread_init(struct thread *td)
4496 td->td_umtxq = umtxq_alloc();
4497 td->td_umtxq->uq_thread = td;
4501 umtx_thread_fini(struct thread *td)
4504 umtxq_free(td->td_umtxq);
4508 * It will be called when new thread is created, e.g fork().
4511 umtx_thread_alloc(struct thread *td)
4516 uq->uq_inherited_pri = PRI_MAX;
4518 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4519 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4520 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4521 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4527 * Clear robust lists for all process' threads, not delaying the
4528 * cleanup to thread exit, since the relevant address space is
4529 * destroyed right now.
4532 umtx_exec(struct proc *p)
4536 KASSERT(p == curproc, ("need curproc"));
4537 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4538 (p->p_flag & P_STOPPED_SINGLE) != 0,
4539 ("curproc must be single-threaded"));
4541 * There is no need to lock the list as only this thread can be
4544 FOREACH_THREAD_IN_PROC(p, td) {
4545 KASSERT(td == curthread ||
4546 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4547 ("running thread %p %p", p, td));
4548 umtx_thread_cleanup(td);
4549 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4557 umtx_thread_exit(struct thread *td)
4560 umtx_thread_cleanup(td);
4564 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res, bool compat32)
4571 error = fueword32((void *)ptr, &res32);
4575 error = fueword((void *)ptr, &res1);
4585 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list,
4588 struct umutex32 m32;
4591 memcpy(&m32, m, sizeof(m32));
4592 *rb_list = m32.m_rb_lnk;
4594 *rb_list = m->m_rb_lnk;
4599 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact,
4605 KASSERT(td->td_proc == curproc, ("need current vmspace"));
4606 error = copyin((void *)rbp, &m, sizeof(m));
4609 if (rb_list != NULL)
4610 umtx_read_rb_list(td, &m, rb_list, compat32);
4611 if ((m.m_flags & UMUTEX_ROBUST) == 0)
4613 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
4614 /* inact is cleared after unlock, allow the inconsistency */
4615 return (inact ? 0 : EINVAL);
4616 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
4620 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
4621 const char *name, bool compat32)
4629 error = umtx_read_uptr(td, rb_list, &rbp, compat32);
4630 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
4631 if (rbp == *rb_inact) {
4636 error = umtx_handle_rb(td, rbp, &rbp, inact, compat32);
4638 if (i == umtx_max_rb && umtx_verbose_rb) {
4639 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
4640 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
4642 if (error != 0 && umtx_verbose_rb) {
4643 uprintf("comm %s pid %d: handling %srb error %d\n",
4644 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
4649 * Clean up umtx data.
4652 umtx_thread_cleanup(struct thread *td)
4660 * Disown pi mutexes.
4664 if (uq->uq_inherited_pri != PRI_MAX ||
4665 !TAILQ_EMPTY(&uq->uq_pi_contested)) {
4666 mtx_lock(&umtx_lock);
4667 uq->uq_inherited_pri = PRI_MAX;
4668 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4669 pi->pi_owner = NULL;
4670 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4672 mtx_unlock(&umtx_lock);
4674 sched_lend_user_prio_cond(td, PRI_MAX);
4677 compat32 = (td->td_pflags2 & TDP2_COMPAT32RB) != 0;
4678 td->td_pflags2 &= ~TDP2_COMPAT32RB;
4680 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
4684 * Handle terminated robust mutexes. Must be done after
4685 * robust pi disown, otherwise unlock could see unowned
4688 rb_inact = td->td_rb_inact;
4690 (void)umtx_read_uptr(td, rb_inact, &rb_inact, compat32);
4691 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "", compat32);
4692 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ", compat32);
4694 (void)umtx_handle_rb(td, rb_inact, NULL, true, compat32);