2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_umtx_profiling.h"
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
43 #include <sys/filedesc.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/taskqueue.h>
64 #include <sys/eventhandler.h>
67 #include <security/mac/mac_framework.h>
70 #include <vm/vm_param.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
75 #include <machine/atomic.h>
76 #include <machine/cpu.h>
78 #ifdef COMPAT_FREEBSD32
79 #include <compat/freebsd32/freebsd32_proto.h>
83 #define _UMUTEX_WAIT 2
86 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
87 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
90 /* Priority inheritance mutex info. */
93 struct thread *pi_owner;
98 /* List entry to link umtx holding by thread */
99 TAILQ_ENTRY(umtx_pi) pi_link;
101 /* List entry in hash */
102 TAILQ_ENTRY(umtx_pi) pi_hashlink;
104 /* List for waiters */
105 TAILQ_HEAD(,umtx_q) pi_blocked;
107 /* Identify a userland lock object */
108 struct umtx_key pi_key;
111 /* A userland synchronous object user. */
113 /* Linked list for the hash. */
114 TAILQ_ENTRY(umtx_q) uq_link;
117 struct umtx_key uq_key;
121 #define UQF_UMTXQ 0x0001
123 /* The thread waits on. */
124 struct thread *uq_thread;
127 * Blocked on PI mutex. read can use chain lock
128 * or umtx_lock, write must have both chain lock and
129 * umtx_lock being hold.
131 struct umtx_pi *uq_pi_blocked;
133 /* On blocked list */
134 TAILQ_ENTRY(umtx_q) uq_lockq;
136 /* Thread contending with us */
137 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
139 /* Inherited priority from PP mutex */
140 u_char uq_inherited_pri;
142 /* Spare queue ready to be reused */
143 struct umtxq_queue *uq_spare_queue;
145 /* The queue we on */
146 struct umtxq_queue *uq_cur_queue;
149 TAILQ_HEAD(umtxq_head, umtx_q);
151 /* Per-key wait-queue */
153 struct umtxq_head head;
155 LIST_ENTRY(umtxq_queue) link;
159 LIST_HEAD(umtxq_list, umtxq_queue);
161 /* Userland lock object's wait-queue chain */
163 /* Lock for this chain. */
166 /* List of sleep queues. */
167 struct umtxq_list uc_queue[2];
168 #define UMTX_SHARED_QUEUE 0
169 #define UMTX_EXCLUSIVE_QUEUE 1
171 LIST_HEAD(, umtxq_queue) uc_spare_queue;
176 /* Chain lock waiters */
179 /* All PI in the list */
180 TAILQ_HEAD(,umtx_pi) uc_pi_list;
182 #ifdef UMTX_PROFILING
188 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
191 * Don't propagate time-sharing priority, there is a security reason,
192 * a user can simply introduce PI-mutex, let thread A lock the mutex,
193 * and let another thread B block on the mutex, because B is
194 * sleeping, its priority will be boosted, this causes A's priority to
195 * be boosted via priority propagating too and will never be lowered even
196 * if it is using 100%CPU, this is unfair to other processes.
199 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
200 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
201 PRI_MAX_TIMESHARE : (td)->td_user_pri)
203 #define GOLDEN_RATIO_PRIME 2654404609U
205 #define UMTX_CHAINS 512
207 #define UMTX_SHIFTS (__WORD_BIT - 9)
209 #define GET_SHARE(flags) \
210 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
212 #define BUSY_SPINS 200
216 bool is_abs_real; /* TIMER_ABSTIME && CLOCK_REALTIME* */
221 #ifdef COMPAT_FREEBSD32
223 volatile __lwpid_t m_owner; /* Owner of the mutex */
224 __uint32_t m_flags; /* Flags of the mutex */
225 __uint32_t m_ceilings[2]; /* Priority protect ceiling */
226 __uint32_t m_rb_lnk; /* Robust linkage */
228 __uint32_t m_spare[2];
231 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
232 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
233 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
236 int umtx_shm_vnobj_persistent = 0;
237 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
238 &umtx_shm_vnobj_persistent, 0,
239 "False forces destruction of umtx attached to file, on last close");
240 static int umtx_max_rb = 1000;
241 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
245 static uma_zone_t umtx_pi_zone;
246 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
247 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
248 static int umtx_pi_allocated;
250 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
251 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
252 &umtx_pi_allocated, 0, "Allocated umtx_pi");
253 static int umtx_verbose_rb = 1;
254 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
258 #ifdef UMTX_PROFILING
259 static long max_length;
260 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
261 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
264 static void abs_timeout_update(struct abs_timeout *timo);
266 static void umtx_shm_init(void);
267 static void umtxq_sysinit(void *);
268 static void umtxq_hash(struct umtx_key *key);
269 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
270 static void umtxq_lock(struct umtx_key *key);
271 static void umtxq_unlock(struct umtx_key *key);
272 static void umtxq_busy(struct umtx_key *key);
273 static void umtxq_unbusy(struct umtx_key *key);
274 static void umtxq_insert_queue(struct umtx_q *uq, int q);
275 static void umtxq_remove_queue(struct umtx_q *uq, int q);
276 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
277 static int umtxq_count(struct umtx_key *key);
278 static struct umtx_pi *umtx_pi_alloc(int);
279 static void umtx_pi_free(struct umtx_pi *pi);
280 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
282 static void umtx_thread_cleanup(struct thread *td);
283 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
284 struct image_params *imgp __unused);
285 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
287 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
288 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
289 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
291 static struct mtx umtx_lock;
293 #ifdef UMTX_PROFILING
295 umtx_init_profiling(void)
297 struct sysctl_oid *chain_oid;
301 for (i = 0; i < UMTX_CHAINS; ++i) {
302 snprintf(chain_name, sizeof(chain_name), "%d", i);
303 chain_oid = SYSCTL_ADD_NODE(NULL,
304 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
305 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
306 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
307 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
308 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
309 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
314 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
318 struct umtxq_chain *uc;
319 u_int fract, i, j, tot, whole;
320 u_int sf0, sf1, sf2, sf3, sf4;
321 u_int si0, si1, si2, si3, si4;
322 u_int sw0, sw1, sw2, sw3, sw4;
324 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
325 for (i = 0; i < 2; i++) {
327 for (j = 0; j < UMTX_CHAINS; ++j) {
328 uc = &umtxq_chains[i][j];
329 mtx_lock(&uc->uc_lock);
330 tot += uc->max_length;
331 mtx_unlock(&uc->uc_lock);
334 sbuf_printf(&sb, "%u) Empty ", i);
336 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
337 si0 = si1 = si2 = si3 = si4 = 0;
338 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
339 for (j = 0; j < UMTX_CHAINS; j++) {
340 uc = &umtxq_chains[i][j];
341 mtx_lock(&uc->uc_lock);
342 whole = uc->max_length * 100;
343 mtx_unlock(&uc->uc_lock);
344 fract = (whole % tot) * 100;
345 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
349 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
354 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
359 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
364 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
371 sbuf_printf(&sb, "queue %u:\n", i);
372 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
374 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
376 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
378 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
380 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
386 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
392 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
394 struct umtxq_chain *uc;
399 error = sysctl_handle_int(oidp, &clear, 0, req);
400 if (error != 0 || req->newptr == NULL)
404 for (i = 0; i < 2; ++i) {
405 for (j = 0; j < UMTX_CHAINS; ++j) {
406 uc = &umtxq_chains[i][j];
407 mtx_lock(&uc->uc_lock);
410 mtx_unlock(&uc->uc_lock);
417 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
418 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
419 sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics");
420 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
421 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
422 sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length");
426 umtxq_sysinit(void *arg __unused)
430 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
431 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
432 for (i = 0; i < 2; ++i) {
433 for (j = 0; j < UMTX_CHAINS; ++j) {
434 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
435 MTX_DEF | MTX_DUPOK);
436 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
437 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
438 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
439 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
440 umtxq_chains[i][j].uc_busy = 0;
441 umtxq_chains[i][j].uc_waiters = 0;
442 #ifdef UMTX_PROFILING
443 umtxq_chains[i][j].length = 0;
444 umtxq_chains[i][j].max_length = 0;
448 #ifdef UMTX_PROFILING
449 umtx_init_profiling();
451 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
452 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
453 EVENTHANDLER_PRI_ANY);
462 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
463 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
465 TAILQ_INIT(&uq->uq_spare_queue->head);
466 TAILQ_INIT(&uq->uq_pi_contested);
467 uq->uq_inherited_pri = PRI_MAX;
472 umtxq_free(struct umtx_q *uq)
475 MPASS(uq->uq_spare_queue != NULL);
476 free(uq->uq_spare_queue, M_UMTX);
481 umtxq_hash(struct umtx_key *key)
485 n = (uintptr_t)key->info.both.a + key->info.both.b;
486 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
489 static inline struct umtxq_chain *
490 umtxq_getchain(struct umtx_key *key)
493 if (key->type <= TYPE_SEM)
494 return (&umtxq_chains[1][key->hash]);
495 return (&umtxq_chains[0][key->hash]);
502 umtxq_lock(struct umtx_key *key)
504 struct umtxq_chain *uc;
506 uc = umtxq_getchain(key);
507 mtx_lock(&uc->uc_lock);
514 umtxq_unlock(struct umtx_key *key)
516 struct umtxq_chain *uc;
518 uc = umtxq_getchain(key);
519 mtx_unlock(&uc->uc_lock);
523 * Set chain to busy state when following operation
524 * may be blocked (kernel mutex can not be used).
527 umtxq_busy(struct umtx_key *key)
529 struct umtxq_chain *uc;
531 uc = umtxq_getchain(key);
532 mtx_assert(&uc->uc_lock, MA_OWNED);
536 int count = BUSY_SPINS;
539 while (uc->uc_busy && --count > 0)
545 while (uc->uc_busy) {
547 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
558 umtxq_unbusy(struct umtx_key *key)
560 struct umtxq_chain *uc;
562 uc = umtxq_getchain(key);
563 mtx_assert(&uc->uc_lock, MA_OWNED);
564 KASSERT(uc->uc_busy != 0, ("not busy"));
571 umtxq_unbusy_unlocked(struct umtx_key *key)
579 static struct umtxq_queue *
580 umtxq_queue_lookup(struct umtx_key *key, int q)
582 struct umtxq_queue *uh;
583 struct umtxq_chain *uc;
585 uc = umtxq_getchain(key);
586 UMTXQ_LOCKED_ASSERT(uc);
587 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
588 if (umtx_key_match(&uh->key, key))
596 umtxq_insert_queue(struct umtx_q *uq, int q)
598 struct umtxq_queue *uh;
599 struct umtxq_chain *uc;
601 uc = umtxq_getchain(&uq->uq_key);
602 UMTXQ_LOCKED_ASSERT(uc);
603 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
604 uh = umtxq_queue_lookup(&uq->uq_key, q);
606 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
608 uh = uq->uq_spare_queue;
609 uh->key = uq->uq_key;
610 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
611 #ifdef UMTX_PROFILING
613 if (uc->length > uc->max_length) {
614 uc->max_length = uc->length;
615 if (uc->max_length > max_length)
616 max_length = uc->max_length;
620 uq->uq_spare_queue = NULL;
622 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
624 uq->uq_flags |= UQF_UMTXQ;
625 uq->uq_cur_queue = uh;
630 umtxq_remove_queue(struct umtx_q *uq, int q)
632 struct umtxq_chain *uc;
633 struct umtxq_queue *uh;
635 uc = umtxq_getchain(&uq->uq_key);
636 UMTXQ_LOCKED_ASSERT(uc);
637 if (uq->uq_flags & UQF_UMTXQ) {
638 uh = uq->uq_cur_queue;
639 TAILQ_REMOVE(&uh->head, uq, uq_link);
641 uq->uq_flags &= ~UQF_UMTXQ;
642 if (TAILQ_EMPTY(&uh->head)) {
643 KASSERT(uh->length == 0,
644 ("inconsistent umtxq_queue length"));
645 #ifdef UMTX_PROFILING
648 LIST_REMOVE(uh, link);
650 uh = LIST_FIRST(&uc->uc_spare_queue);
651 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
652 LIST_REMOVE(uh, link);
654 uq->uq_spare_queue = uh;
655 uq->uq_cur_queue = NULL;
660 * Check if there are multiple waiters
663 umtxq_count(struct umtx_key *key)
665 struct umtxq_queue *uh;
667 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
668 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
675 * Check if there are multiple PI waiters and returns first
679 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
681 struct umtxq_queue *uh;
684 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
685 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
687 *first = TAILQ_FIRST(&uh->head);
694 * Check for possible stops and suspensions while executing a umtx
697 * The sleep argument controls whether the function can handle a stop
698 * request itself or it should return ERESTART and the request is
699 * proceed at the kernel/user boundary in ast.
701 * Typically, when retrying due to casueword(9) failure (rv == 1), we
702 * should handle the stop requests there, with exception of cases when
703 * the thread busied the umtx key, or when functions return
704 * immediately if umtxq_check_susp() returned non-zero. On the other
705 * hand, retrying the whole lock operation, we better not stop there
706 * but delegate the handling to ast.
708 * If the request is for thread termination P_SINGLE_EXIT, we cannot
709 * handle it at all, and simply return EINTR.
712 umtxq_check_susp(struct thread *td, bool sleep)
718 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
719 * eventually break the lockstep loop.
721 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
726 if (p->p_flag & P_SINGLE_EXIT)
728 else if (P_SHOULDSTOP(p) ||
729 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
730 error = sleep ? thread_suspend_check(0) : ERESTART;
736 * Wake up threads waiting on an userland object.
740 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
742 struct umtxq_queue *uh;
747 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
748 uh = umtxq_queue_lookup(key, q);
750 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
751 umtxq_remove_queue(uq, q);
762 * Wake up specified thread.
765 umtxq_signal_thread(struct umtx_q *uq)
768 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
774 tstohz(const struct timespec *tsp)
778 TIMESPEC_TO_TIMEVAL(&tv, tsp);
783 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
784 const struct timespec *timeout)
787 timo->clockid = clockid;
789 timo->is_abs_real = false;
790 abs_timeout_update(timo);
791 timespecadd(&timo->cur, timeout, &timo->end);
793 timo->end = *timeout;
794 timo->is_abs_real = clockid == CLOCK_REALTIME ||
795 clockid == CLOCK_REALTIME_FAST ||
796 clockid == CLOCK_REALTIME_PRECISE;
798 * If is_abs_real, umtxq_sleep will read the clock
799 * after setting td_rtcgen; otherwise, read it here.
801 if (!timo->is_abs_real) {
802 abs_timeout_update(timo);
808 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
811 abs_timeout_init(timo, umtxtime->_clockid,
812 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
816 abs_timeout_update(struct abs_timeout *timo)
819 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
823 abs_timeout_gethz(struct abs_timeout *timo)
827 if (timespeccmp(&timo->end, &timo->cur, <=))
829 timespecsub(&timo->end, &timo->cur, &tts);
830 return (tstohz(&tts));
834 umtx_unlock_val(uint32_t flags, bool rb)
838 return (UMUTEX_RB_OWNERDEAD);
839 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
840 return (UMUTEX_RB_NOTRECOV);
842 return (UMUTEX_UNOWNED);
847 * Put thread into sleep state, before sleeping, check if
848 * thread was removed from umtx queue.
851 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
853 struct umtxq_chain *uc;
856 if (abstime != NULL && abstime->is_abs_real) {
857 curthread->td_rtcgen = atomic_load_acq_int(&rtc_generation);
858 abs_timeout_update(abstime);
861 uc = umtxq_getchain(&uq->uq_key);
862 UMTXQ_LOCKED_ASSERT(uc);
864 if (!(uq->uq_flags & UQF_UMTXQ)) {
868 if (abstime != NULL) {
869 timo = abs_timeout_gethz(abstime);
876 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
877 if (error == EINTR || error == ERESTART) {
878 umtxq_lock(&uq->uq_key);
881 if (abstime != NULL) {
882 if (abstime->is_abs_real)
883 curthread->td_rtcgen =
884 atomic_load_acq_int(&rtc_generation);
885 abs_timeout_update(abstime);
887 umtxq_lock(&uq->uq_key);
890 curthread->td_rtcgen = 0;
895 * Convert userspace address into unique logical address.
898 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
900 struct thread *td = curthread;
902 vm_map_entry_t entry;
908 if (share == THREAD_SHARE) {
910 key->info.private.vs = td->td_proc->p_vmspace;
911 key->info.private.addr = (uintptr_t)addr;
913 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
914 map = &td->td_proc->p_vmspace->vm_map;
915 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
916 &entry, &key->info.shared.object, &pindex, &prot,
917 &wired) != KERN_SUCCESS) {
921 if ((share == PROCESS_SHARE) ||
922 (share == AUTO_SHARE &&
923 VM_INHERIT_SHARE == entry->inheritance)) {
925 key->info.shared.offset = (vm_offset_t)addr -
926 entry->start + entry->offset;
927 vm_object_reference(key->info.shared.object);
930 key->info.private.vs = td->td_proc->p_vmspace;
931 key->info.private.addr = (uintptr_t)addr;
933 vm_map_lookup_done(map, entry);
944 umtx_key_release(struct umtx_key *key)
947 vm_object_deallocate(key->info.shared.object);
951 * Fetch and compare value, sleep on the address if value is not changed.
954 do_wait(struct thread *td, void *addr, u_long id,
955 struct _umtx_time *timeout, int compat32, int is_private)
957 struct abs_timeout timo;
964 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
965 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
969 abs_timeout_init2(&timo, timeout);
971 umtxq_lock(&uq->uq_key);
973 umtxq_unlock(&uq->uq_key);
975 error = fueword(addr, &tmp);
979 error = fueword32(addr, &tmp32);
985 umtxq_lock(&uq->uq_key);
988 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
990 if ((uq->uq_flags & UQF_UMTXQ) == 0)
994 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
997 umtxq_unlock(&uq->uq_key);
998 umtx_key_release(&uq->uq_key);
999 if (error == ERESTART)
1005 * Wake up threads sleeping on the specified address.
1008 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1010 struct umtx_key key;
1013 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1014 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1017 umtxq_signal(&key, n_wake);
1019 umtx_key_release(&key);
1024 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1027 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1028 struct _umtx_time *timeout, int mode)
1030 struct abs_timeout timo;
1032 uint32_t owner, old, id;
1038 if (timeout != NULL)
1039 abs_timeout_init2(&timo, timeout);
1042 * Care must be exercised when dealing with umtx structure. It
1043 * can fault on any access.
1046 rv = fueword32(&m->m_owner, &owner);
1049 if (mode == _UMUTEX_WAIT) {
1050 if (owner == UMUTEX_UNOWNED ||
1051 owner == UMUTEX_CONTESTED ||
1052 owner == UMUTEX_RB_OWNERDEAD ||
1053 owner == UMUTEX_RB_NOTRECOV)
1057 * Robust mutex terminated. Kernel duty is to
1058 * return EOWNERDEAD to the userspace. The
1059 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1060 * by the common userspace code.
1062 if (owner == UMUTEX_RB_OWNERDEAD) {
1063 rv = casueword32(&m->m_owner,
1064 UMUTEX_RB_OWNERDEAD, &owner,
1065 id | UMUTEX_CONTESTED);
1069 MPASS(owner == UMUTEX_RB_OWNERDEAD);
1070 return (EOWNERDEAD); /* success */
1073 rv = umtxq_check_susp(td, false);
1078 if (owner == UMUTEX_RB_NOTRECOV)
1079 return (ENOTRECOVERABLE);
1082 * Try the uncontested case. This should be
1085 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1087 /* The address was invalid. */
1091 /* The acquire succeeded. */
1093 MPASS(owner == UMUTEX_UNOWNED);
1098 * If no one owns it but it is contested try
1102 if (owner == UMUTEX_CONTESTED) {
1103 rv = casueword32(&m->m_owner,
1104 UMUTEX_CONTESTED, &owner,
1105 id | UMUTEX_CONTESTED);
1106 /* The address was invalid. */
1110 MPASS(owner == UMUTEX_CONTESTED);
1114 rv = umtxq_check_susp(td, false);
1120 * If this failed the lock has
1126 /* rv == 1 but not contested, likely store failure */
1127 rv = umtxq_check_susp(td, false);
1132 if (mode == _UMUTEX_TRY)
1136 * If we caught a signal, we have retried and now
1142 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1143 GET_SHARE(flags), &uq->uq_key)) != 0)
1146 umtxq_lock(&uq->uq_key);
1147 umtxq_busy(&uq->uq_key);
1149 umtxq_unlock(&uq->uq_key);
1152 * Set the contested bit so that a release in user space
1153 * knows to use the system call for unlock. If this fails
1154 * either some one else has acquired the lock or it has been
1157 rv = casueword32(&m->m_owner, owner, &old,
1158 owner | UMUTEX_CONTESTED);
1160 /* The address was invalid or casueword failed to store. */
1161 if (rv == -1 || rv == 1) {
1162 umtxq_lock(&uq->uq_key);
1164 umtxq_unbusy(&uq->uq_key);
1165 umtxq_unlock(&uq->uq_key);
1166 umtx_key_release(&uq->uq_key);
1170 rv = umtxq_check_susp(td, false);
1178 * We set the contested bit, sleep. Otherwise the lock changed
1179 * and we need to retry or we lost a race to the thread
1180 * unlocking the umtx.
1182 umtxq_lock(&uq->uq_key);
1183 umtxq_unbusy(&uq->uq_key);
1184 MPASS(old == owner);
1185 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1188 umtxq_unlock(&uq->uq_key);
1189 umtx_key_release(&uq->uq_key);
1192 error = umtxq_check_susp(td, false);
1199 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1202 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1204 struct umtx_key key;
1205 uint32_t owner, old, id, newlock;
1212 * Make sure we own this mtx.
1214 error = fueword32(&m->m_owner, &owner);
1218 if ((owner & ~UMUTEX_CONTESTED) != id)
1221 newlock = umtx_unlock_val(flags, rb);
1222 if ((owner & UMUTEX_CONTESTED) == 0) {
1223 error = casueword32(&m->m_owner, owner, &old, newlock);
1227 error = umtxq_check_susp(td, false);
1232 MPASS(old == owner);
1236 /* We should only ever be in here for contested locks */
1237 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1243 count = umtxq_count(&key);
1247 * When unlocking the umtx, it must be marked as unowned if
1248 * there is zero or one thread only waiting for it.
1249 * Otherwise, it must be marked as contested.
1252 newlock |= UMUTEX_CONTESTED;
1253 error = casueword32(&m->m_owner, owner, &old, newlock);
1255 umtxq_signal(&key, 1);
1258 umtx_key_release(&key);
1264 error = umtxq_check_susp(td, false);
1273 * Check if the mutex is available and wake up a waiter,
1274 * only for simple mutex.
1277 do_wake_umutex(struct thread *td, struct umutex *m)
1279 struct umtx_key key;
1286 error = fueword32(&m->m_owner, &owner);
1290 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1291 owner != UMUTEX_RB_NOTRECOV)
1294 error = fueword32(&m->m_flags, &flags);
1298 /* We should only ever be in here for contested locks */
1299 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1305 count = umtxq_count(&key);
1308 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1309 owner != UMUTEX_RB_NOTRECOV) {
1310 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1314 } else if (error == 1) {
1318 umtx_key_release(&key);
1319 error = umtxq_check_susp(td, false);
1327 if (error == 0 && count != 0) {
1328 MPASS((owner & ~UMUTEX_CONTESTED) == 0 ||
1329 owner == UMUTEX_RB_OWNERDEAD ||
1330 owner == UMUTEX_RB_NOTRECOV);
1331 umtxq_signal(&key, 1);
1335 umtx_key_release(&key);
1340 * Check if the mutex has waiters and tries to fix contention bit.
1343 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1345 struct umtx_key key;
1346 uint32_t owner, old;
1351 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1355 type = TYPE_NORMAL_UMUTEX;
1357 case UMUTEX_PRIO_INHERIT:
1358 type = TYPE_PI_UMUTEX;
1360 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1361 type = TYPE_PI_ROBUST_UMUTEX;
1363 case UMUTEX_PRIO_PROTECT:
1364 type = TYPE_PP_UMUTEX;
1366 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1367 type = TYPE_PP_ROBUST_UMUTEX;
1372 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1378 count = umtxq_count(&key);
1381 error = fueword32(&m->m_owner, &owner);
1386 * Only repair contention bit if there is a waiter, this means
1387 * the mutex is still being referenced by userland code,
1388 * otherwise don't update any memory.
1390 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 &&
1391 (count > 1 || (count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) {
1392 error = casueword32(&m->m_owner, owner, &old,
1393 owner | UMUTEX_CONTESTED);
1399 MPASS(old == owner);
1403 error = umtxq_check_susp(td, false);
1407 if (error == EFAULT) {
1408 umtxq_signal(&key, INT_MAX);
1409 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1410 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1411 umtxq_signal(&key, 1);
1414 umtx_key_release(&key);
1418 static inline struct umtx_pi *
1419 umtx_pi_alloc(int flags)
1423 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1424 TAILQ_INIT(&pi->pi_blocked);
1425 atomic_add_int(&umtx_pi_allocated, 1);
1430 umtx_pi_free(struct umtx_pi *pi)
1432 uma_zfree(umtx_pi_zone, pi);
1433 atomic_add_int(&umtx_pi_allocated, -1);
1437 * Adjust the thread's position on a pi_state after its priority has been
1441 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1443 struct umtx_q *uq, *uq1, *uq2;
1446 mtx_assert(&umtx_lock, MA_OWNED);
1453 * Check if the thread needs to be moved on the blocked chain.
1454 * It needs to be moved if either its priority is lower than
1455 * the previous thread or higher than the next thread.
1457 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1458 uq2 = TAILQ_NEXT(uq, uq_lockq);
1459 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1460 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1462 * Remove thread from blocked chain and determine where
1463 * it should be moved to.
1465 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1466 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1467 td1 = uq1->uq_thread;
1468 MPASS(td1->td_proc->p_magic == P_MAGIC);
1469 if (UPRI(td1) > UPRI(td))
1474 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1476 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1481 static struct umtx_pi *
1482 umtx_pi_next(struct umtx_pi *pi)
1484 struct umtx_q *uq_owner;
1486 if (pi->pi_owner == NULL)
1488 uq_owner = pi->pi_owner->td_umtxq;
1489 if (uq_owner == NULL)
1491 return (uq_owner->uq_pi_blocked);
1495 * Floyd's Cycle-Finding Algorithm.
1498 umtx_pi_check_loop(struct umtx_pi *pi)
1500 struct umtx_pi *pi1; /* fast iterator */
1502 mtx_assert(&umtx_lock, MA_OWNED);
1507 pi = umtx_pi_next(pi);
1510 pi1 = umtx_pi_next(pi1);
1513 pi1 = umtx_pi_next(pi1);
1523 * Propagate priority when a thread is blocked on POSIX
1527 umtx_propagate_priority(struct thread *td)
1533 mtx_assert(&umtx_lock, MA_OWNED);
1536 pi = uq->uq_pi_blocked;
1539 if (umtx_pi_check_loop(pi))
1544 if (td == NULL || td == curthread)
1547 MPASS(td->td_proc != NULL);
1548 MPASS(td->td_proc->p_magic == P_MAGIC);
1551 if (td->td_lend_user_pri > pri)
1552 sched_lend_user_prio(td, pri);
1560 * Pick up the lock that td is blocked on.
1563 pi = uq->uq_pi_blocked;
1566 /* Resort td on the list if needed. */
1567 umtx_pi_adjust_thread(pi, td);
1572 * Unpropagate priority for a PI mutex when a thread blocked on
1573 * it is interrupted by signal or resumed by others.
1576 umtx_repropagate_priority(struct umtx_pi *pi)
1578 struct umtx_q *uq, *uq_owner;
1579 struct umtx_pi *pi2;
1582 mtx_assert(&umtx_lock, MA_OWNED);
1584 if (umtx_pi_check_loop(pi))
1586 while (pi != NULL && pi->pi_owner != NULL) {
1588 uq_owner = pi->pi_owner->td_umtxq;
1590 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1591 uq = TAILQ_FIRST(&pi2->pi_blocked);
1593 if (pri > UPRI(uq->uq_thread))
1594 pri = UPRI(uq->uq_thread);
1598 if (pri > uq_owner->uq_inherited_pri)
1599 pri = uq_owner->uq_inherited_pri;
1600 thread_lock(pi->pi_owner);
1601 sched_lend_user_prio(pi->pi_owner, pri);
1602 thread_unlock(pi->pi_owner);
1603 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1604 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1609 * Insert a PI mutex into owned list.
1612 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1614 struct umtx_q *uq_owner;
1616 uq_owner = owner->td_umtxq;
1617 mtx_assert(&umtx_lock, MA_OWNED);
1618 MPASS(pi->pi_owner == NULL);
1619 pi->pi_owner = owner;
1620 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1625 * Disown a PI mutex, and remove it from the owned list.
1628 umtx_pi_disown(struct umtx_pi *pi)
1631 mtx_assert(&umtx_lock, MA_OWNED);
1632 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1633 pi->pi_owner = NULL;
1637 * Claim ownership of a PI mutex.
1640 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1645 mtx_lock(&umtx_lock);
1646 if (pi->pi_owner == owner) {
1647 mtx_unlock(&umtx_lock);
1651 if (pi->pi_owner != NULL) {
1653 * userland may have already messed the mutex, sigh.
1655 mtx_unlock(&umtx_lock);
1658 umtx_pi_setowner(pi, owner);
1659 uq = TAILQ_FIRST(&pi->pi_blocked);
1661 pri = UPRI(uq->uq_thread);
1663 if (pri < UPRI(owner))
1664 sched_lend_user_prio(owner, pri);
1665 thread_unlock(owner);
1667 mtx_unlock(&umtx_lock);
1672 * Adjust a thread's order position in its blocked PI mutex,
1673 * this may result new priority propagating process.
1676 umtx_pi_adjust(struct thread *td, u_char oldpri)
1682 mtx_lock(&umtx_lock);
1684 * Pick up the lock that td is blocked on.
1686 pi = uq->uq_pi_blocked;
1688 umtx_pi_adjust_thread(pi, td);
1689 umtx_repropagate_priority(pi);
1691 mtx_unlock(&umtx_lock);
1695 * Sleep on a PI mutex.
1698 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1699 const char *wmesg, struct abs_timeout *timo, bool shared)
1701 struct thread *td, *td1;
1705 struct umtxq_chain *uc;
1707 uc = umtxq_getchain(&pi->pi_key);
1711 KASSERT(td == curthread, ("inconsistent uq_thread"));
1712 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
1713 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1715 mtx_lock(&umtx_lock);
1716 if (pi->pi_owner == NULL) {
1717 mtx_unlock(&umtx_lock);
1718 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
1719 mtx_lock(&umtx_lock);
1721 if (pi->pi_owner == NULL)
1722 umtx_pi_setowner(pi, td1);
1723 PROC_UNLOCK(td1->td_proc);
1727 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1728 pri = UPRI(uq1->uq_thread);
1734 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1736 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1738 uq->uq_pi_blocked = pi;
1740 td->td_flags |= TDF_UPIBLOCKED;
1742 umtx_propagate_priority(td);
1743 mtx_unlock(&umtx_lock);
1744 umtxq_unbusy(&uq->uq_key);
1746 error = umtxq_sleep(uq, wmesg, timo);
1749 mtx_lock(&umtx_lock);
1750 uq->uq_pi_blocked = NULL;
1752 td->td_flags &= ~TDF_UPIBLOCKED;
1754 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1755 umtx_repropagate_priority(pi);
1756 mtx_unlock(&umtx_lock);
1757 umtxq_unlock(&uq->uq_key);
1763 * Add reference count for a PI mutex.
1766 umtx_pi_ref(struct umtx_pi *pi)
1769 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key));
1774 * Decrease reference count for a PI mutex, if the counter
1775 * is decreased to zero, its memory space is freed.
1778 umtx_pi_unref(struct umtx_pi *pi)
1780 struct umtxq_chain *uc;
1782 uc = umtxq_getchain(&pi->pi_key);
1783 UMTXQ_LOCKED_ASSERT(uc);
1784 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1785 if (--pi->pi_refcount == 0) {
1786 mtx_lock(&umtx_lock);
1787 if (pi->pi_owner != NULL)
1789 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1790 ("blocked queue not empty"));
1791 mtx_unlock(&umtx_lock);
1792 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1798 * Find a PI mutex in hash table.
1800 static struct umtx_pi *
1801 umtx_pi_lookup(struct umtx_key *key)
1803 struct umtxq_chain *uc;
1806 uc = umtxq_getchain(key);
1807 UMTXQ_LOCKED_ASSERT(uc);
1809 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1810 if (umtx_key_match(&pi->pi_key, key)) {
1818 * Insert a PI mutex into hash table.
1821 umtx_pi_insert(struct umtx_pi *pi)
1823 struct umtxq_chain *uc;
1825 uc = umtxq_getchain(&pi->pi_key);
1826 UMTXQ_LOCKED_ASSERT(uc);
1827 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1834 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1835 struct _umtx_time *timeout, int try)
1837 struct abs_timeout timo;
1839 struct umtx_pi *pi, *new_pi;
1840 uint32_t id, old_owner, owner, old;
1846 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
1847 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
1851 if (timeout != NULL)
1852 abs_timeout_init2(&timo, timeout);
1854 umtxq_lock(&uq->uq_key);
1855 pi = umtx_pi_lookup(&uq->uq_key);
1857 new_pi = umtx_pi_alloc(M_NOWAIT);
1858 if (new_pi == NULL) {
1859 umtxq_unlock(&uq->uq_key);
1860 new_pi = umtx_pi_alloc(M_WAITOK);
1861 umtxq_lock(&uq->uq_key);
1862 pi = umtx_pi_lookup(&uq->uq_key);
1864 umtx_pi_free(new_pi);
1868 if (new_pi != NULL) {
1869 new_pi->pi_key = uq->uq_key;
1870 umtx_pi_insert(new_pi);
1875 umtxq_unlock(&uq->uq_key);
1878 * Care must be exercised when dealing with umtx structure. It
1879 * can fault on any access.
1883 * Try the uncontested case. This should be done in userland.
1885 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
1886 /* The address was invalid. */
1891 /* The acquire succeeded. */
1893 MPASS(owner == UMUTEX_UNOWNED);
1898 if (owner == UMUTEX_RB_NOTRECOV) {
1899 error = ENOTRECOVERABLE;
1904 * Avoid overwriting a possible error from sleep due
1905 * to the pending signal with suspension check result.
1908 error = umtxq_check_susp(td, true);
1913 /* If no one owns it but it is contested try to acquire it. */
1914 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
1916 rv = casueword32(&m->m_owner, owner, &owner,
1917 id | UMUTEX_CONTESTED);
1918 /* The address was invalid. */
1925 error = umtxq_check_susp(td, true);
1931 * If this failed the lock could
1938 MPASS(owner == old_owner);
1939 umtxq_lock(&uq->uq_key);
1940 umtxq_busy(&uq->uq_key);
1941 error = umtx_pi_claim(pi, td);
1942 umtxq_unbusy(&uq->uq_key);
1943 umtxq_unlock(&uq->uq_key);
1946 * Since we're going to return an
1947 * error, restore the m_owner to its
1948 * previous, unowned state to avoid
1949 * compounding the problem.
1951 (void)casuword32(&m->m_owner,
1952 id | UMUTEX_CONTESTED, old_owner);
1954 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD)
1959 if ((owner & ~UMUTEX_CONTESTED) == id) {
1970 * If we caught a signal, we have retried and now
1976 umtxq_lock(&uq->uq_key);
1977 umtxq_busy(&uq->uq_key);
1978 umtxq_unlock(&uq->uq_key);
1981 * Set the contested bit so that a release in user space
1982 * knows to use the system call for unlock. If this fails
1983 * either some one else has acquired the lock or it has been
1986 rv = casueword32(&m->m_owner, owner, &old, owner |
1989 /* The address was invalid. */
1991 umtxq_unbusy_unlocked(&uq->uq_key);
1996 umtxq_unbusy_unlocked(&uq->uq_key);
1997 error = umtxq_check_susp(td, true);
2002 * The lock changed and we need to retry or we
2003 * lost a race to the thread unlocking the
2004 * umtx. Note that the UMUTEX_RB_OWNERDEAD
2005 * value for owner is impossible there.
2010 umtxq_lock(&uq->uq_key);
2012 /* We set the contested bit, sleep. */
2013 MPASS(old == owner);
2014 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
2015 "umtxpi", timeout == NULL ? NULL : &timo,
2016 (flags & USYNC_PROCESS_SHARED) != 0);
2020 error = umtxq_check_susp(td, false);
2025 umtxq_lock(&uq->uq_key);
2027 umtxq_unlock(&uq->uq_key);
2029 umtx_key_release(&uq->uq_key);
2034 * Unlock a PI mutex.
2037 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2039 struct umtx_key key;
2040 struct umtx_q *uq_first, *uq_first2, *uq_me;
2041 struct umtx_pi *pi, *pi2;
2042 uint32_t id, new_owner, old, owner;
2043 int count, error, pri;
2049 * Make sure we own this mtx.
2051 error = fueword32(&m->m_owner, &owner);
2055 if ((owner & ~UMUTEX_CONTESTED) != id)
2058 new_owner = umtx_unlock_val(flags, rb);
2060 /* This should be done in userland */
2061 if ((owner & UMUTEX_CONTESTED) == 0) {
2062 error = casueword32(&m->m_owner, owner, &old, new_owner);
2066 error = umtxq_check_susp(td, true);
2076 /* We should only ever be in here for contested locks */
2077 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2078 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2084 count = umtxq_count_pi(&key, &uq_first);
2085 if (uq_first != NULL) {
2086 mtx_lock(&umtx_lock);
2087 pi = uq_first->uq_pi_blocked;
2088 KASSERT(pi != NULL, ("pi == NULL?"));
2089 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2090 mtx_unlock(&umtx_lock);
2093 umtx_key_release(&key);
2094 /* userland messed the mutex */
2097 uq_me = td->td_umtxq;
2098 if (pi->pi_owner == td)
2100 /* get highest priority thread which is still sleeping. */
2101 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2102 while (uq_first != NULL &&
2103 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2104 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2107 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2108 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2109 if (uq_first2 != NULL) {
2110 if (pri > UPRI(uq_first2->uq_thread))
2111 pri = UPRI(uq_first2->uq_thread);
2115 sched_lend_user_prio(td, pri);
2117 mtx_unlock(&umtx_lock);
2119 umtxq_signal_thread(uq_first);
2121 pi = umtx_pi_lookup(&key);
2123 * A umtx_pi can exist if a signal or timeout removed the
2124 * last waiter from the umtxq, but there is still
2125 * a thread in do_lock_pi() holding the umtx_pi.
2129 * The umtx_pi can be unowned, such as when a thread
2130 * has just entered do_lock_pi(), allocated the
2131 * umtx_pi, and unlocked the umtxq.
2132 * If the current thread owns it, it must disown it.
2134 mtx_lock(&umtx_lock);
2135 if (pi->pi_owner == td)
2137 mtx_unlock(&umtx_lock);
2143 * When unlocking the umtx, it must be marked as unowned if
2144 * there is zero or one thread only waiting for it.
2145 * Otherwise, it must be marked as contested.
2149 new_owner |= UMUTEX_CONTESTED;
2151 error = casueword32(&m->m_owner, owner, &old, new_owner);
2153 error = umtxq_check_susp(td, false);
2157 umtxq_unbusy_unlocked(&key);
2158 umtx_key_release(&key);
2161 if (error == 0 && old != owner)
2170 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2171 struct _umtx_time *timeout, int try)
2173 struct abs_timeout timo;
2174 struct umtx_q *uq, *uq2;
2178 int error, pri, old_inherited_pri, su, rv;
2182 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2183 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2187 if (timeout != NULL)
2188 abs_timeout_init2(&timo, timeout);
2190 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2192 old_inherited_pri = uq->uq_inherited_pri;
2193 umtxq_lock(&uq->uq_key);
2194 umtxq_busy(&uq->uq_key);
2195 umtxq_unlock(&uq->uq_key);
2197 rv = fueword32(&m->m_ceilings[0], &ceiling);
2202 ceiling = RTP_PRIO_MAX - ceiling;
2203 if (ceiling > RTP_PRIO_MAX) {
2208 mtx_lock(&umtx_lock);
2209 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2210 mtx_unlock(&umtx_lock);
2214 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2215 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2217 if (uq->uq_inherited_pri < UPRI(td))
2218 sched_lend_user_prio(td, uq->uq_inherited_pri);
2221 mtx_unlock(&umtx_lock);
2223 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2224 id | UMUTEX_CONTESTED);
2225 /* The address was invalid. */
2231 MPASS(owner == UMUTEX_CONTESTED);
2236 if (owner == UMUTEX_RB_OWNERDEAD) {
2237 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2238 &owner, id | UMUTEX_CONTESTED);
2244 MPASS(owner == UMUTEX_RB_OWNERDEAD);
2245 error = EOWNERDEAD; /* success */
2250 * rv == 1, only check for suspension if we
2251 * did not already catched a signal. If we
2252 * get an error from the check, the same
2253 * condition is checked by the umtxq_sleep()
2254 * call below, so we should obliterate the
2255 * error to not skip the last loop iteration.
2258 error = umtxq_check_susp(td, false);
2267 } else if (owner == UMUTEX_RB_NOTRECOV) {
2268 error = ENOTRECOVERABLE;
2275 * If we caught a signal, we have retried and now
2281 umtxq_lock(&uq->uq_key);
2283 umtxq_unbusy(&uq->uq_key);
2284 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2287 umtxq_unlock(&uq->uq_key);
2289 mtx_lock(&umtx_lock);
2290 uq->uq_inherited_pri = old_inherited_pri;
2292 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2293 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2295 if (pri > UPRI(uq2->uq_thread))
2296 pri = UPRI(uq2->uq_thread);
2299 if (pri > uq->uq_inherited_pri)
2300 pri = uq->uq_inherited_pri;
2302 sched_lend_user_prio(td, pri);
2304 mtx_unlock(&umtx_lock);
2307 if (error != 0 && error != EOWNERDEAD) {
2308 mtx_lock(&umtx_lock);
2309 uq->uq_inherited_pri = old_inherited_pri;
2311 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2312 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2314 if (pri > UPRI(uq2->uq_thread))
2315 pri = UPRI(uq2->uq_thread);
2318 if (pri > uq->uq_inherited_pri)
2319 pri = uq->uq_inherited_pri;
2321 sched_lend_user_prio(td, pri);
2323 mtx_unlock(&umtx_lock);
2327 umtxq_unbusy_unlocked(&uq->uq_key);
2328 umtx_key_release(&uq->uq_key);
2333 * Unlock a PP mutex.
2336 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2338 struct umtx_key key;
2339 struct umtx_q *uq, *uq2;
2341 uint32_t id, owner, rceiling;
2342 int error, pri, new_inherited_pri, su;
2346 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2349 * Make sure we own this mtx.
2351 error = fueword32(&m->m_owner, &owner);
2355 if ((owner & ~UMUTEX_CONTESTED) != id)
2358 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2363 new_inherited_pri = PRI_MAX;
2365 rceiling = RTP_PRIO_MAX - rceiling;
2366 if (rceiling > RTP_PRIO_MAX)
2368 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2371 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2372 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2379 * For priority protected mutex, always set unlocked state
2380 * to UMUTEX_CONTESTED, so that userland always enters kernel
2381 * to lock the mutex, it is necessary because thread priority
2382 * has to be adjusted for such mutex.
2384 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2389 umtxq_signal(&key, 1);
2396 mtx_lock(&umtx_lock);
2398 uq->uq_inherited_pri = new_inherited_pri;
2400 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2401 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2403 if (pri > UPRI(uq2->uq_thread))
2404 pri = UPRI(uq2->uq_thread);
2407 if (pri > uq->uq_inherited_pri)
2408 pri = uq->uq_inherited_pri;
2410 sched_lend_user_prio(td, pri);
2412 mtx_unlock(&umtx_lock);
2414 umtx_key_release(&key);
2419 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2420 uint32_t *old_ceiling)
2423 uint32_t flags, id, owner, save_ceiling;
2426 error = fueword32(&m->m_flags, &flags);
2429 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2431 if (ceiling > RTP_PRIO_MAX)
2435 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2436 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2440 umtxq_lock(&uq->uq_key);
2441 umtxq_busy(&uq->uq_key);
2442 umtxq_unlock(&uq->uq_key);
2444 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2450 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2451 id | UMUTEX_CONTESTED);
2458 MPASS(owner == UMUTEX_CONTESTED);
2459 rv = suword32(&m->m_ceilings[0], ceiling);
2460 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2461 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2465 if ((owner & ~UMUTEX_CONTESTED) == id) {
2466 rv = suword32(&m->m_ceilings[0], ceiling);
2467 error = rv == 0 ? 0 : EFAULT;
2471 if (owner == UMUTEX_RB_OWNERDEAD) {
2474 } else if (owner == UMUTEX_RB_NOTRECOV) {
2475 error = ENOTRECOVERABLE;
2480 * If we caught a signal, we have retried and now
2487 * We set the contested bit, sleep. Otherwise the lock changed
2488 * and we need to retry or we lost a race to the thread
2489 * unlocking the umtx.
2491 umtxq_lock(&uq->uq_key);
2493 umtxq_unbusy(&uq->uq_key);
2494 error = umtxq_sleep(uq, "umtxpp", NULL);
2496 umtxq_unlock(&uq->uq_key);
2498 umtxq_lock(&uq->uq_key);
2500 umtxq_signal(&uq->uq_key, INT_MAX);
2501 umtxq_unbusy(&uq->uq_key);
2502 umtxq_unlock(&uq->uq_key);
2503 umtx_key_release(&uq->uq_key);
2504 if (error == 0 && old_ceiling != NULL) {
2505 rv = suword32(old_ceiling, save_ceiling);
2506 error = rv == 0 ? 0 : EFAULT;
2512 * Lock a userland POSIX mutex.
2515 do_lock_umutex(struct thread *td, struct umutex *m,
2516 struct _umtx_time *timeout, int mode)
2521 error = fueword32(&m->m_flags, &flags);
2525 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2527 error = do_lock_normal(td, m, flags, timeout, mode);
2529 case UMUTEX_PRIO_INHERIT:
2530 error = do_lock_pi(td, m, flags, timeout, mode);
2532 case UMUTEX_PRIO_PROTECT:
2533 error = do_lock_pp(td, m, flags, timeout, mode);
2538 if (timeout == NULL) {
2539 if (error == EINTR && mode != _UMUTEX_WAIT)
2542 /* Timed-locking is not restarted. */
2543 if (error == ERESTART)
2550 * Unlock a userland POSIX mutex.
2553 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2558 error = fueword32(&m->m_flags, &flags);
2562 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2564 return (do_unlock_normal(td, m, flags, rb));
2565 case UMUTEX_PRIO_INHERIT:
2566 return (do_unlock_pi(td, m, flags, rb));
2567 case UMUTEX_PRIO_PROTECT:
2568 return (do_unlock_pp(td, m, flags, rb));
2575 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2576 struct timespec *timeout, u_long wflags)
2578 struct abs_timeout timo;
2580 uint32_t flags, clockid, hasw;
2584 error = fueword32(&cv->c_flags, &flags);
2587 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2591 if ((wflags & CVWAIT_CLOCKID) != 0) {
2592 error = fueword32(&cv->c_clockid, &clockid);
2594 umtx_key_release(&uq->uq_key);
2597 if (clockid < CLOCK_REALTIME ||
2598 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2599 /* hmm, only HW clock id will work. */
2600 umtx_key_release(&uq->uq_key);
2604 clockid = CLOCK_REALTIME;
2607 umtxq_lock(&uq->uq_key);
2608 umtxq_busy(&uq->uq_key);
2610 umtxq_unlock(&uq->uq_key);
2613 * Set c_has_waiters to 1 before releasing user mutex, also
2614 * don't modify cache line when unnecessary.
2616 error = fueword32(&cv->c_has_waiters, &hasw);
2617 if (error == 0 && hasw == 0)
2618 suword32(&cv->c_has_waiters, 1);
2620 umtxq_unbusy_unlocked(&uq->uq_key);
2622 error = do_unlock_umutex(td, m, false);
2624 if (timeout != NULL)
2625 abs_timeout_init(&timo, clockid, (wflags & CVWAIT_ABSTIME) != 0,
2628 umtxq_lock(&uq->uq_key);
2630 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2634 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2638 * This must be timeout,interrupted by signal or
2639 * surprious wakeup, clear c_has_waiter flag when
2642 umtxq_busy(&uq->uq_key);
2643 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2644 int oldlen = uq->uq_cur_queue->length;
2647 umtxq_unlock(&uq->uq_key);
2648 suword32(&cv->c_has_waiters, 0);
2649 umtxq_lock(&uq->uq_key);
2652 umtxq_unbusy(&uq->uq_key);
2653 if (error == ERESTART)
2657 umtxq_unlock(&uq->uq_key);
2658 umtx_key_release(&uq->uq_key);
2663 * Signal a userland condition variable.
2666 do_cv_signal(struct thread *td, struct ucond *cv)
2668 struct umtx_key key;
2669 int error, cnt, nwake;
2672 error = fueword32(&cv->c_flags, &flags);
2675 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2679 cnt = umtxq_count(&key);
2680 nwake = umtxq_signal(&key, 1);
2683 error = suword32(&cv->c_has_waiters, 0);
2690 umtx_key_release(&key);
2695 do_cv_broadcast(struct thread *td, struct ucond *cv)
2697 struct umtx_key key;
2701 error = fueword32(&cv->c_flags, &flags);
2704 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2709 umtxq_signal(&key, INT_MAX);
2712 error = suword32(&cv->c_has_waiters, 0);
2716 umtxq_unbusy_unlocked(&key);
2718 umtx_key_release(&key);
2723 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag,
2724 struct _umtx_time *timeout)
2726 struct abs_timeout timo;
2728 uint32_t flags, wrflags;
2729 int32_t state, oldstate;
2730 int32_t blocked_readers;
2731 int error, error1, rv;
2734 error = fueword32(&rwlock->rw_flags, &flags);
2737 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2741 if (timeout != NULL)
2742 abs_timeout_init2(&timo, timeout);
2744 wrflags = URWLOCK_WRITE_OWNER;
2745 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2746 wrflags |= URWLOCK_WRITE_WAITERS;
2749 rv = fueword32(&rwlock->rw_state, &state);
2751 umtx_key_release(&uq->uq_key);
2755 /* try to lock it */
2756 while (!(state & wrflags)) {
2757 if (__predict_false(URWLOCK_READER_COUNT(state) ==
2758 URWLOCK_MAX_READERS)) {
2759 umtx_key_release(&uq->uq_key);
2762 rv = casueword32(&rwlock->rw_state, state,
2763 &oldstate, state + 1);
2765 umtx_key_release(&uq->uq_key);
2769 MPASS(oldstate == state);
2770 umtx_key_release(&uq->uq_key);
2773 error = umtxq_check_susp(td, true);
2782 /* grab monitor lock */
2783 umtxq_lock(&uq->uq_key);
2784 umtxq_busy(&uq->uq_key);
2785 umtxq_unlock(&uq->uq_key);
2788 * re-read the state, in case it changed between the try-lock above
2789 * and the check below
2791 rv = fueword32(&rwlock->rw_state, &state);
2795 /* set read contention bit */
2796 while (error == 0 && (state & wrflags) &&
2797 !(state & URWLOCK_READ_WAITERS)) {
2798 rv = casueword32(&rwlock->rw_state, state,
2799 &oldstate, state | URWLOCK_READ_WAITERS);
2805 MPASS(oldstate == state);
2809 error = umtxq_check_susp(td, false);
2814 umtxq_unbusy_unlocked(&uq->uq_key);
2818 /* state is changed while setting flags, restart */
2819 if (!(state & wrflags)) {
2820 umtxq_unbusy_unlocked(&uq->uq_key);
2821 error = umtxq_check_susp(td, true);
2829 * Contention bit is set, before sleeping, increase
2830 * read waiter count.
2832 rv = fueword32(&rwlock->rw_blocked_readers,
2835 umtxq_unbusy_unlocked(&uq->uq_key);
2839 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2841 while (state & wrflags) {
2842 umtxq_lock(&uq->uq_key);
2844 umtxq_unbusy(&uq->uq_key);
2846 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2849 umtxq_busy(&uq->uq_key);
2851 umtxq_unlock(&uq->uq_key);
2854 rv = fueword32(&rwlock->rw_state, &state);
2861 /* decrease read waiter count, and may clear read contention bit */
2862 rv = fueword32(&rwlock->rw_blocked_readers,
2865 umtxq_unbusy_unlocked(&uq->uq_key);
2869 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2870 if (blocked_readers == 1) {
2871 rv = fueword32(&rwlock->rw_state, &state);
2873 umtxq_unbusy_unlocked(&uq->uq_key);
2878 rv = casueword32(&rwlock->rw_state, state,
2879 &oldstate, state & ~URWLOCK_READ_WAITERS);
2885 MPASS(oldstate == state);
2889 error1 = umtxq_check_susp(td, false);
2898 umtxq_unbusy_unlocked(&uq->uq_key);
2902 umtx_key_release(&uq->uq_key);
2903 if (error == ERESTART)
2909 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2911 struct abs_timeout timo;
2914 int32_t state, oldstate;
2915 int32_t blocked_writers;
2916 int32_t blocked_readers;
2917 int error, error1, rv;
2920 error = fueword32(&rwlock->rw_flags, &flags);
2923 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2927 if (timeout != NULL)
2928 abs_timeout_init2(&timo, timeout);
2930 blocked_readers = 0;
2932 rv = fueword32(&rwlock->rw_state, &state);
2934 umtx_key_release(&uq->uq_key);
2937 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
2938 URWLOCK_READER_COUNT(state) == 0) {
2939 rv = casueword32(&rwlock->rw_state, state,
2940 &oldstate, state | URWLOCK_WRITE_OWNER);
2942 umtx_key_release(&uq->uq_key);
2946 MPASS(oldstate == state);
2947 umtx_key_release(&uq->uq_key);
2951 error = umtxq_check_susp(td, true);
2957 if ((state & (URWLOCK_WRITE_OWNER |
2958 URWLOCK_WRITE_WAITERS)) == 0 &&
2959 blocked_readers != 0) {
2960 umtxq_lock(&uq->uq_key);
2961 umtxq_busy(&uq->uq_key);
2962 umtxq_signal_queue(&uq->uq_key, INT_MAX,
2964 umtxq_unbusy(&uq->uq_key);
2965 umtxq_unlock(&uq->uq_key);
2971 /* grab monitor lock */
2972 umtxq_lock(&uq->uq_key);
2973 umtxq_busy(&uq->uq_key);
2974 umtxq_unlock(&uq->uq_key);
2977 * Re-read the state, in case it changed between the
2978 * try-lock above and the check below.
2980 rv = fueword32(&rwlock->rw_state, &state);
2984 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
2985 URWLOCK_READER_COUNT(state) != 0) &&
2986 (state & URWLOCK_WRITE_WAITERS) == 0) {
2987 rv = casueword32(&rwlock->rw_state, state,
2988 &oldstate, state | URWLOCK_WRITE_WAITERS);
2994 MPASS(oldstate == state);
2998 error = umtxq_check_susp(td, false);
3003 umtxq_unbusy_unlocked(&uq->uq_key);
3007 if ((state & URWLOCK_WRITE_OWNER) == 0 &&
3008 URWLOCK_READER_COUNT(state) == 0) {
3009 umtxq_unbusy_unlocked(&uq->uq_key);
3010 error = umtxq_check_susp(td, false);
3016 rv = fueword32(&rwlock->rw_blocked_writers,
3019 umtxq_unbusy_unlocked(&uq->uq_key);
3023 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1);
3025 while ((state & URWLOCK_WRITE_OWNER) ||
3026 URWLOCK_READER_COUNT(state) != 0) {
3027 umtxq_lock(&uq->uq_key);
3028 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3029 umtxq_unbusy(&uq->uq_key);
3031 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
3034 umtxq_busy(&uq->uq_key);
3035 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3036 umtxq_unlock(&uq->uq_key);
3039 rv = fueword32(&rwlock->rw_state, &state);
3046 rv = fueword32(&rwlock->rw_blocked_writers,
3049 umtxq_unbusy_unlocked(&uq->uq_key);
3053 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
3054 if (blocked_writers == 1) {
3055 rv = fueword32(&rwlock->rw_state, &state);
3057 umtxq_unbusy_unlocked(&uq->uq_key);
3062 rv = casueword32(&rwlock->rw_state, state,
3063 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
3069 MPASS(oldstate == state);
3073 error1 = umtxq_check_susp(td, false);
3075 * We are leaving the URWLOCK_WRITE_WAITERS
3076 * behind, but this should not harm the
3085 rv = fueword32(&rwlock->rw_blocked_readers,
3088 umtxq_unbusy_unlocked(&uq->uq_key);
3093 blocked_readers = 0;
3095 umtxq_unbusy_unlocked(&uq->uq_key);
3098 umtx_key_release(&uq->uq_key);
3099 if (error == ERESTART)
3105 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3109 int32_t state, oldstate;
3110 int error, rv, q, count;
3113 error = fueword32(&rwlock->rw_flags, &flags);
3116 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3120 error = fueword32(&rwlock->rw_state, &state);
3125 if (state & URWLOCK_WRITE_OWNER) {
3127 rv = casueword32(&rwlock->rw_state, state,
3128 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3135 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3139 error = umtxq_check_susp(td, true);
3145 } else if (URWLOCK_READER_COUNT(state) != 0) {
3147 rv = casueword32(&rwlock->rw_state, state,
3148 &oldstate, state - 1);
3155 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3159 error = umtxq_check_susp(td, true);
3172 if (!(flags & URWLOCK_PREFER_READER)) {
3173 if (state & URWLOCK_WRITE_WAITERS) {
3175 q = UMTX_EXCLUSIVE_QUEUE;
3176 } else if (state & URWLOCK_READ_WAITERS) {
3178 q = UMTX_SHARED_QUEUE;
3181 if (state & URWLOCK_READ_WAITERS) {
3183 q = UMTX_SHARED_QUEUE;
3184 } else if (state & URWLOCK_WRITE_WAITERS) {
3186 q = UMTX_EXCLUSIVE_QUEUE;
3191 umtxq_lock(&uq->uq_key);
3192 umtxq_busy(&uq->uq_key);
3193 umtxq_signal_queue(&uq->uq_key, count, q);
3194 umtxq_unbusy(&uq->uq_key);
3195 umtxq_unlock(&uq->uq_key);
3198 umtx_key_release(&uq->uq_key);
3202 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3204 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3206 struct abs_timeout timo;
3208 uint32_t flags, count, count1;
3212 error = fueword32(&sem->_flags, &flags);
3215 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3219 if (timeout != NULL)
3220 abs_timeout_init2(&timo, timeout);
3223 umtxq_lock(&uq->uq_key);
3224 umtxq_busy(&uq->uq_key);
3226 umtxq_unlock(&uq->uq_key);
3227 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3229 rv1 = fueword32(&sem->_count, &count);
3230 if (rv == -1 || (rv == 0 && (rv1 == -1 || count != 0)) ||
3231 (rv == 1 && count1 == 0)) {
3232 umtxq_lock(&uq->uq_key);
3233 umtxq_unbusy(&uq->uq_key);
3235 umtxq_unlock(&uq->uq_key);
3237 rv = umtxq_check_susp(td, true);
3245 error = rv == -1 ? EFAULT : 0;
3248 umtxq_lock(&uq->uq_key);
3249 umtxq_unbusy(&uq->uq_key);
3251 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3253 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3257 /* A relative timeout cannot be restarted. */
3258 if (error == ERESTART && timeout != NULL &&
3259 (timeout->_flags & UMTX_ABSTIME) == 0)
3262 umtxq_unlock(&uq->uq_key);
3264 umtx_key_release(&uq->uq_key);
3269 * Signal a userland semaphore.
3272 do_sem_wake(struct thread *td, struct _usem *sem)
3274 struct umtx_key key;
3278 error = fueword32(&sem->_flags, &flags);
3281 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3285 cnt = umtxq_count(&key);
3288 * Check if count is greater than 0, this means the memory is
3289 * still being referenced by user code, so we can safely
3290 * update _has_waiters flag.
3294 error = suword32(&sem->_has_waiters, 0);
3299 umtxq_signal(&key, 1);
3303 umtx_key_release(&key);
3309 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3311 struct abs_timeout timo;
3313 uint32_t count, flags;
3317 flags = fuword32(&sem->_flags);
3318 if (timeout != NULL)
3319 abs_timeout_init2(&timo, timeout);
3322 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3325 umtxq_lock(&uq->uq_key);
3326 umtxq_busy(&uq->uq_key);
3328 umtxq_unlock(&uq->uq_key);
3329 rv = fueword32(&sem->_count, &count);
3331 umtxq_lock(&uq->uq_key);
3332 umtxq_unbusy(&uq->uq_key);
3334 umtxq_unlock(&uq->uq_key);
3335 umtx_key_release(&uq->uq_key);
3339 if (USEM_COUNT(count) != 0) {
3340 umtxq_lock(&uq->uq_key);
3341 umtxq_unbusy(&uq->uq_key);
3343 umtxq_unlock(&uq->uq_key);
3344 umtx_key_release(&uq->uq_key);
3347 if (count == USEM_HAS_WAITERS)
3349 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3352 umtxq_lock(&uq->uq_key);
3353 umtxq_unbusy(&uq->uq_key);
3355 umtxq_unlock(&uq->uq_key);
3356 umtx_key_release(&uq->uq_key);
3359 rv = umtxq_check_susp(td, true);
3364 umtxq_lock(&uq->uq_key);
3365 umtxq_unbusy(&uq->uq_key);
3367 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3369 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3373 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3374 /* A relative timeout cannot be restarted. */
3375 if (error == ERESTART)
3377 if (error == EINTR) {
3378 abs_timeout_update(&timo);
3379 timespecsub(&timo.end, &timo.cur,
3380 &timeout->_timeout);
3384 umtxq_unlock(&uq->uq_key);
3385 umtx_key_release(&uq->uq_key);
3390 * Signal a userland semaphore.
3393 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3395 struct umtx_key key;
3397 uint32_t count, flags;
3399 rv = fueword32(&sem->_flags, &flags);
3402 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3406 cnt = umtxq_count(&key);
3409 * If this was the last sleeping thread, clear the waiters
3414 rv = fueword32(&sem->_count, &count);
3415 while (rv != -1 && count & USEM_HAS_WAITERS) {
3416 rv = casueword32(&sem->_count, count, &count,
3417 count & ~USEM_HAS_WAITERS);
3419 rv = umtxq_check_susp(td, true);
3432 umtxq_signal(&key, 1);
3436 umtx_key_release(&key);
3441 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
3445 error = copyin(addr, tsp, sizeof(struct timespec));
3447 if (tsp->tv_sec < 0 ||
3448 tsp->tv_nsec >= 1000000000 ||
3456 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
3460 if (size <= sizeof(struct timespec)) {
3461 tp->_clockid = CLOCK_REALTIME;
3463 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
3465 error = copyin(addr, tp, sizeof(struct _umtx_time));
3468 if (tp->_timeout.tv_sec < 0 ||
3469 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3475 __umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap)
3478 return (EOPNOTSUPP);
3482 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
3484 struct _umtx_time timeout, *tm_p;
3487 if (uap->uaddr2 == NULL)
3490 error = umtx_copyin_umtx_time(
3491 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3496 return (do_wait(td, uap->obj, uap->val, tm_p, 0, 0));
3500 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
3502 struct _umtx_time timeout, *tm_p;
3505 if (uap->uaddr2 == NULL)
3508 error = umtx_copyin_umtx_time(
3509 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3514 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3518 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3520 struct _umtx_time *tm_p, timeout;
3523 if (uap->uaddr2 == NULL)
3526 error = umtx_copyin_umtx_time(
3527 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3532 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3536 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3539 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3542 #define BATCH_SIZE 128
3544 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3546 char *uaddrs[BATCH_SIZE], **upp;
3547 int count, error, i, pos, tocopy;
3549 upp = (char **)uap->obj;
3551 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3553 tocopy = MIN(count, BATCH_SIZE);
3554 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3557 for (i = 0; i < tocopy; ++i)
3558 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3565 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3568 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3572 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3574 struct _umtx_time *tm_p, timeout;
3577 /* Allow a null timespec (wait forever). */
3578 if (uap->uaddr2 == NULL)
3581 error = umtx_copyin_umtx_time(
3582 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3587 return (do_lock_umutex(td, uap->obj, tm_p, 0));
3591 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3594 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
3598 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3600 struct _umtx_time *tm_p, timeout;
3603 /* Allow a null timespec (wait forever). */
3604 if (uap->uaddr2 == NULL)
3607 error = umtx_copyin_umtx_time(
3608 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3613 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
3617 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3620 return (do_wake_umutex(td, uap->obj));
3624 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3627 return (do_unlock_umutex(td, uap->obj, false));
3631 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3634 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
3638 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3640 struct timespec *ts, timeout;
3643 /* Allow a null timespec (wait forever). */
3644 if (uap->uaddr2 == NULL)
3647 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3652 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3656 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3659 return (do_cv_signal(td, uap->obj));
3663 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3666 return (do_cv_broadcast(td, uap->obj));
3670 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3672 struct _umtx_time timeout;
3675 /* Allow a null timespec (wait forever). */
3676 if (uap->uaddr2 == NULL) {
3677 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3679 error = umtx_copyin_umtx_time(uap->uaddr2,
3680 (size_t)uap->uaddr1, &timeout);
3683 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3689 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3691 struct _umtx_time timeout;
3694 /* Allow a null timespec (wait forever). */
3695 if (uap->uaddr2 == NULL) {
3696 error = do_rw_wrlock(td, uap->obj, 0);
3698 error = umtx_copyin_umtx_time(uap->uaddr2,
3699 (size_t)uap->uaddr1, &timeout);
3703 error = do_rw_wrlock(td, uap->obj, &timeout);
3709 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3712 return (do_rw_unlock(td, uap->obj));
3715 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3717 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3719 struct _umtx_time *tm_p, timeout;
3722 /* Allow a null timespec (wait forever). */
3723 if (uap->uaddr2 == NULL)
3726 error = umtx_copyin_umtx_time(
3727 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3732 return (do_sem_wait(td, uap->obj, tm_p));
3736 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3739 return (do_sem_wake(td, uap->obj));
3744 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3747 return (do_wake2_umutex(td, uap->obj, uap->val));
3751 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
3753 struct _umtx_time *tm_p, timeout;
3757 /* Allow a null timespec (wait forever). */
3758 if (uap->uaddr2 == NULL) {
3762 uasize = (size_t)uap->uaddr1;
3763 error = umtx_copyin_umtx_time(uap->uaddr2, uasize, &timeout);
3768 error = do_sem2_wait(td, uap->obj, tm_p);
3769 if (error == EINTR && uap->uaddr2 != NULL &&
3770 (timeout._flags & UMTX_ABSTIME) == 0 &&
3771 uasize >= sizeof(struct _umtx_time) + sizeof(struct timespec)) {
3772 error = copyout(&timeout._timeout,
3773 (struct _umtx_time *)uap->uaddr2 + 1,
3774 sizeof(struct timespec));
3784 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap)
3787 return (do_sem2_wake(td, uap->obj));
3790 #define USHM_OBJ_UMTX(o) \
3791 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
3793 #define USHMF_REG_LINKED 0x0001
3794 #define USHMF_OBJ_LINKED 0x0002
3795 struct umtx_shm_reg {
3796 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
3797 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
3798 struct umtx_key ushm_key;
3799 struct ucred *ushm_cred;
3800 struct shmfd *ushm_obj;
3805 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
3806 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
3808 static uma_zone_t umtx_shm_reg_zone;
3809 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
3810 static struct mtx umtx_shm_lock;
3811 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
3812 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
3814 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
3817 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
3819 struct umtx_shm_reg_head d;
3820 struct umtx_shm_reg *reg, *reg1;
3823 mtx_lock(&umtx_shm_lock);
3824 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
3825 mtx_unlock(&umtx_shm_lock);
3826 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
3827 TAILQ_REMOVE(&d, reg, ushm_reg_link);
3828 umtx_shm_free_reg(reg);
3832 static struct task umtx_shm_reg_delfree_task =
3833 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
3835 static struct umtx_shm_reg *
3836 umtx_shm_find_reg_locked(const struct umtx_key *key)
3838 struct umtx_shm_reg *reg;
3839 struct umtx_shm_reg_head *reg_head;
3841 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
3842 mtx_assert(&umtx_shm_lock, MA_OWNED);
3843 reg_head = &umtx_shm_registry[key->hash];
3844 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
3845 KASSERT(reg->ushm_key.shared,
3846 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
3847 if (reg->ushm_key.info.shared.object ==
3848 key->info.shared.object &&
3849 reg->ushm_key.info.shared.offset ==
3850 key->info.shared.offset) {
3851 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
3852 KASSERT(reg->ushm_refcnt > 0,
3853 ("reg %p refcnt 0 onlist", reg));
3854 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
3855 ("reg %p not linked", reg));
3863 static struct umtx_shm_reg *
3864 umtx_shm_find_reg(const struct umtx_key *key)
3866 struct umtx_shm_reg *reg;
3868 mtx_lock(&umtx_shm_lock);
3869 reg = umtx_shm_find_reg_locked(key);
3870 mtx_unlock(&umtx_shm_lock);
3875 umtx_shm_free_reg(struct umtx_shm_reg *reg)
3878 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
3879 crfree(reg->ushm_cred);
3880 shm_drop(reg->ushm_obj);
3881 uma_zfree(umtx_shm_reg_zone, reg);
3885 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
3889 mtx_assert(&umtx_shm_lock, MA_OWNED);
3890 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
3892 res = reg->ushm_refcnt == 0;
3894 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
3895 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
3896 reg, ushm_reg_link);
3897 reg->ushm_flags &= ~USHMF_REG_LINKED;
3899 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
3900 LIST_REMOVE(reg, ushm_obj_link);
3901 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
3908 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
3914 object = reg->ushm_obj->shm_object;
3915 VM_OBJECT_WLOCK(object);
3916 object->flags |= OBJ_UMTXDEAD;
3917 VM_OBJECT_WUNLOCK(object);
3919 mtx_lock(&umtx_shm_lock);
3920 dofree = umtx_shm_unref_reg_locked(reg, force);
3921 mtx_unlock(&umtx_shm_lock);
3923 umtx_shm_free_reg(reg);
3927 umtx_shm_object_init(vm_object_t object)
3930 LIST_INIT(USHM_OBJ_UMTX(object));
3934 umtx_shm_object_terminated(vm_object_t object)
3936 struct umtx_shm_reg *reg, *reg1;
3939 if (LIST_EMPTY(USHM_OBJ_UMTX(object)))
3943 mtx_lock(&umtx_shm_lock);
3944 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
3945 if (umtx_shm_unref_reg_locked(reg, true)) {
3946 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
3951 mtx_unlock(&umtx_shm_lock);
3953 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
3957 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
3958 struct umtx_shm_reg **res)
3960 struct umtx_shm_reg *reg, *reg1;
3964 reg = umtx_shm_find_reg(key);
3969 cred = td->td_ucred;
3970 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
3972 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
3973 reg->ushm_refcnt = 1;
3974 bcopy(key, ®->ushm_key, sizeof(*key));
3975 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR);
3976 reg->ushm_cred = crhold(cred);
3977 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
3979 umtx_shm_free_reg(reg);
3982 mtx_lock(&umtx_shm_lock);
3983 reg1 = umtx_shm_find_reg_locked(key);
3985 mtx_unlock(&umtx_shm_lock);
3986 umtx_shm_free_reg(reg);
3991 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
3992 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
3994 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
3995 mtx_unlock(&umtx_shm_lock);
4001 umtx_shm_alive(struct thread *td, void *addr)
4004 vm_map_entry_t entry;
4011 map = &td->td_proc->p_vmspace->vm_map;
4012 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
4013 &object, &pindex, &prot, &wired);
4014 if (res != KERN_SUCCESS)
4019 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
4020 vm_map_lookup_done(map, entry);
4029 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
4030 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4031 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
4032 for (i = 0; i < nitems(umtx_shm_registry); i++)
4033 TAILQ_INIT(&umtx_shm_registry[i]);
4037 umtx_shm(struct thread *td, void *addr, u_int flags)
4039 struct umtx_key key;
4040 struct umtx_shm_reg *reg;
4044 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
4045 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
4047 if ((flags & UMTX_SHM_ALIVE) != 0)
4048 return (umtx_shm_alive(td, addr));
4049 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
4052 KASSERT(key.shared == 1, ("non-shared key"));
4053 if ((flags & UMTX_SHM_CREAT) != 0) {
4054 error = umtx_shm_create_reg(td, &key, ®);
4056 reg = umtx_shm_find_reg(&key);
4060 umtx_key_release(&key);
4063 KASSERT(reg != NULL, ("no reg"));
4064 if ((flags & UMTX_SHM_DESTROY) != 0) {
4065 umtx_shm_unref_reg(reg, true);
4069 error = mac_posixshm_check_open(td->td_ucred,
4070 reg->ushm_obj, FFLAGS(O_RDWR));
4073 error = shm_access(reg->ushm_obj, td->td_ucred,
4077 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
4079 shm_hold(reg->ushm_obj);
4080 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
4082 td->td_retval[0] = fd;
4086 umtx_shm_unref_reg(reg, false);
4091 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap)
4094 return (umtx_shm(td, uap->uaddr1, uap->val));
4098 umtx_robust_lists(struct thread *td, struct umtx_robust_lists_params *rbp)
4101 td->td_rb_list = rbp->robust_list_offset;
4102 td->td_rbp_list = rbp->robust_priv_list_offset;
4103 td->td_rb_inact = rbp->robust_inact_offset;
4108 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap)
4110 struct umtx_robust_lists_params rb;
4113 if (uap->val > sizeof(rb))
4115 bzero(&rb, sizeof(rb));
4116 error = copyin(uap->uaddr1, &rb, uap->val);
4119 return (umtx_robust_lists(td, &rb));
4122 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
4124 static const _umtx_op_func op_table[] = {
4125 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4126 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4127 [UMTX_OP_WAIT] = __umtx_op_wait,
4128 [UMTX_OP_WAKE] = __umtx_op_wake,
4129 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4130 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
4131 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4132 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4133 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4134 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4135 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4136 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4137 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4138 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4139 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4140 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4141 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4142 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4143 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4144 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4145 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4146 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4148 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4149 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4151 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4152 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4153 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4154 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4155 [UMTX_OP_SHM] = __umtx_op_shm,
4156 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4160 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4163 if ((unsigned)uap->op < nitems(op_table))
4164 return (*op_table[uap->op])(td, uap);
4168 #ifdef COMPAT_FREEBSD32
4175 struct umtx_time32 {
4176 struct timespec32 timeout;
4182 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
4184 struct timespec32 ts32;
4187 error = copyin(addr, &ts32, sizeof(struct timespec32));
4189 if (ts32.tv_sec < 0 ||
4190 ts32.tv_nsec >= 1000000000 ||
4194 tsp->tv_sec = ts32.tv_sec;
4195 tsp->tv_nsec = ts32.tv_nsec;
4202 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
4204 struct umtx_time32 t32;
4207 t32.clockid = CLOCK_REALTIME;
4209 if (size <= sizeof(struct timespec32))
4210 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
4212 error = copyin(addr, &t32, sizeof(struct umtx_time32));
4215 if (t32.timeout.tv_sec < 0 ||
4216 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
4218 tp->_timeout.tv_sec = t32.timeout.tv_sec;
4219 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
4220 tp->_flags = t32.flags;
4221 tp->_clockid = t32.clockid;
4226 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4228 struct _umtx_time *tm_p, timeout;
4231 if (uap->uaddr2 == NULL)
4234 error = umtx_copyin_umtx_time32(uap->uaddr2,
4235 (size_t)uap->uaddr1, &timeout);
4240 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
4244 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4246 struct _umtx_time *tm_p, timeout;
4249 /* Allow a null timespec (wait forever). */
4250 if (uap->uaddr2 == NULL)
4253 error = umtx_copyin_umtx_time32(uap->uaddr2,
4254 (size_t)uap->uaddr1, &timeout);
4259 return (do_lock_umutex(td, uap->obj, tm_p, 0));
4263 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4265 struct _umtx_time *tm_p, timeout;
4268 /* Allow a null timespec (wait forever). */
4269 if (uap->uaddr2 == NULL)
4272 error = umtx_copyin_umtx_time32(uap->uaddr2,
4273 (size_t)uap->uaddr1, &timeout);
4278 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
4282 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4284 struct timespec *ts, timeout;
4287 /* Allow a null timespec (wait forever). */
4288 if (uap->uaddr2 == NULL)
4291 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
4296 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
4300 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4302 struct _umtx_time timeout;
4305 /* Allow a null timespec (wait forever). */
4306 if (uap->uaddr2 == NULL) {
4307 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
4309 error = umtx_copyin_umtx_time32(uap->uaddr2,
4310 (size_t)uap->uaddr1, &timeout);
4313 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4319 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4321 struct _umtx_time timeout;
4324 /* Allow a null timespec (wait forever). */
4325 if (uap->uaddr2 == NULL) {
4326 error = do_rw_wrlock(td, uap->obj, 0);
4328 error = umtx_copyin_umtx_time32(uap->uaddr2,
4329 (size_t)uap->uaddr1, &timeout);
4332 error = do_rw_wrlock(td, uap->obj, &timeout);
4338 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
4340 struct _umtx_time *tm_p, timeout;
4343 if (uap->uaddr2 == NULL)
4346 error = umtx_copyin_umtx_time32(
4347 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
4352 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
4355 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4357 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4359 struct _umtx_time *tm_p, timeout;
4362 /* Allow a null timespec (wait forever). */
4363 if (uap->uaddr2 == NULL)
4366 error = umtx_copyin_umtx_time32(uap->uaddr2,
4367 (size_t)uap->uaddr1, &timeout);
4372 return (do_sem_wait(td, uap->obj, tm_p));
4377 __umtx_op_sem2_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4379 struct _umtx_time *tm_p, timeout;
4383 /* Allow a null timespec (wait forever). */
4384 if (uap->uaddr2 == NULL) {
4388 uasize = (size_t)uap->uaddr1;
4389 error = umtx_copyin_umtx_time32(uap->uaddr2, uasize, &timeout);
4394 error = do_sem2_wait(td, uap->obj, tm_p);
4395 if (error == EINTR && uap->uaddr2 != NULL &&
4396 (timeout._flags & UMTX_ABSTIME) == 0 &&
4397 uasize >= sizeof(struct umtx_time32) + sizeof(struct timespec32)) {
4398 struct timespec32 remain32 = {
4399 .tv_sec = timeout._timeout.tv_sec,
4400 .tv_nsec = timeout._timeout.tv_nsec
4402 error = copyout(&remain32,
4403 (struct umtx_time32 *)uap->uaddr2 + 1,
4404 sizeof(struct timespec32));
4414 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
4416 uint32_t uaddrs[BATCH_SIZE], **upp;
4417 int count, error, i, pos, tocopy;
4419 upp = (uint32_t **)uap->obj;
4421 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
4423 tocopy = MIN(count, BATCH_SIZE);
4424 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
4427 for (i = 0; i < tocopy; ++i)
4428 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
4435 struct umtx_robust_lists_params_compat32 {
4436 uint32_t robust_list_offset;
4437 uint32_t robust_priv_list_offset;
4438 uint32_t robust_inact_offset;
4442 __umtx_op_robust_lists_compat32(struct thread *td, struct _umtx_op_args *uap)
4444 struct umtx_robust_lists_params rb;
4445 struct umtx_robust_lists_params_compat32 rb32;
4448 if (uap->val > sizeof(rb32))
4450 bzero(&rb, sizeof(rb));
4451 bzero(&rb32, sizeof(rb32));
4452 error = copyin(uap->uaddr1, &rb32, uap->val);
4455 rb.robust_list_offset = rb32.robust_list_offset;
4456 rb.robust_priv_list_offset = rb32.robust_priv_list_offset;
4457 rb.robust_inact_offset = rb32.robust_inact_offset;
4458 return (umtx_robust_lists(td, &rb));
4461 static const _umtx_op_func op_table_compat32[] = {
4462 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4463 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4464 [UMTX_OP_WAIT] = __umtx_op_wait_compat32,
4465 [UMTX_OP_WAKE] = __umtx_op_wake,
4466 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4467 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex_compat32,
4468 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4469 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4470 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait_compat32,
4471 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4472 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4473 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_compat32,
4474 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock_compat32,
4475 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock_compat32,
4476 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4477 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private_compat32,
4478 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4479 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex_compat32,
4480 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4481 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4482 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait_compat32,
4483 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4485 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4486 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4488 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private32,
4489 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4490 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait_compat32,
4491 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4492 [UMTX_OP_SHM] = __umtx_op_shm,
4493 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists_compat32,
4497 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap)
4500 if ((unsigned)uap->op < nitems(op_table_compat32)) {
4501 return (*op_table_compat32[uap->op])(td,
4502 (struct _umtx_op_args *)uap);
4509 umtx_thread_init(struct thread *td)
4512 td->td_umtxq = umtxq_alloc();
4513 td->td_umtxq->uq_thread = td;
4517 umtx_thread_fini(struct thread *td)
4520 umtxq_free(td->td_umtxq);
4524 * It will be called when new thread is created, e.g fork().
4527 umtx_thread_alloc(struct thread *td)
4532 uq->uq_inherited_pri = PRI_MAX;
4534 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4535 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4536 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4537 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4543 * Clear robust lists for all process' threads, not delaying the
4544 * cleanup to thread_exit hook, since the relevant address space is
4545 * destroyed right now.
4548 umtx_exec_hook(void *arg __unused, struct proc *p,
4549 struct image_params *imgp __unused)
4553 KASSERT(p == curproc, ("need curproc"));
4554 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4555 (p->p_flag & P_STOPPED_SINGLE) != 0,
4556 ("curproc must be single-threaded"));
4558 * There is no need to lock the list as only this thread can be
4561 FOREACH_THREAD_IN_PROC(p, td) {
4562 KASSERT(td == curthread ||
4563 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4564 ("running thread %p %p", p, td));
4565 umtx_thread_cleanup(td);
4566 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4571 * thread_exit() hook.
4574 umtx_thread_exit(struct thread *td)
4577 umtx_thread_cleanup(td);
4581 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res)
4584 #ifdef COMPAT_FREEBSD32
4589 #ifdef COMPAT_FREEBSD32
4590 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4591 error = fueword32((void *)ptr, &res32);
4597 error = fueword((void *)ptr, &res1);
4607 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list)
4609 #ifdef COMPAT_FREEBSD32
4610 struct umutex32 m32;
4612 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4613 memcpy(&m32, m, sizeof(m32));
4614 *rb_list = m32.m_rb_lnk;
4617 *rb_list = m->m_rb_lnk;
4621 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact)
4626 KASSERT(td->td_proc == curproc, ("need current vmspace"));
4627 error = copyin((void *)rbp, &m, sizeof(m));
4630 if (rb_list != NULL)
4631 umtx_read_rb_list(td, &m, rb_list);
4632 if ((m.m_flags & UMUTEX_ROBUST) == 0)
4634 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
4635 /* inact is cleared after unlock, allow the inconsistency */
4636 return (inact ? 0 : EINVAL);
4637 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
4641 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
4650 error = umtx_read_uptr(td, rb_list, &rbp);
4651 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
4652 if (rbp == *rb_inact) {
4657 error = umtx_handle_rb(td, rbp, &rbp, inact);
4659 if (i == umtx_max_rb && umtx_verbose_rb) {
4660 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
4661 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
4663 if (error != 0 && umtx_verbose_rb) {
4664 uprintf("comm %s pid %d: handling %srb error %d\n",
4665 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
4670 * Clean up umtx data.
4673 umtx_thread_cleanup(struct thread *td)
4680 * Disown pi mutexes.
4684 if (uq->uq_inherited_pri != PRI_MAX ||
4685 !TAILQ_EMPTY(&uq->uq_pi_contested)) {
4686 mtx_lock(&umtx_lock);
4687 uq->uq_inherited_pri = PRI_MAX;
4688 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4689 pi->pi_owner = NULL;
4690 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4692 mtx_unlock(&umtx_lock);
4694 sched_lend_user_prio_cond(td, PRI_MAX);
4697 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
4701 * Handle terminated robust mutexes. Must be done after
4702 * robust pi disown, otherwise unlock could see unowned
4705 rb_inact = td->td_rb_inact;
4707 (void)umtx_read_uptr(td, rb_inact, &rb_inact);
4708 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "");
4709 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ");
4711 (void)umtx_handle_rb(td, rb_inact, NULL, true);