2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_umtx_profiling.h"
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
43 #include <sys/filedesc.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/taskqueue.h>
64 #include <sys/eventhandler.h>
67 #include <security/mac/mac_framework.h>
70 #include <vm/vm_param.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
75 #include <machine/atomic.h>
76 #include <machine/cpu.h>
78 #ifdef COMPAT_FREEBSD32
79 #include <compat/freebsd32/freebsd32_proto.h>
83 #define _UMUTEX_WAIT 2
86 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
87 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
90 /* Priority inheritance mutex info. */
93 struct thread *pi_owner;
98 /* List entry to link umtx holding by thread */
99 TAILQ_ENTRY(umtx_pi) pi_link;
101 /* List entry in hash */
102 TAILQ_ENTRY(umtx_pi) pi_hashlink;
104 /* List for waiters */
105 TAILQ_HEAD(,umtx_q) pi_blocked;
107 /* Identify a userland lock object */
108 struct umtx_key pi_key;
111 /* A userland synchronous object user. */
113 /* Linked list for the hash. */
114 TAILQ_ENTRY(umtx_q) uq_link;
117 struct umtx_key uq_key;
121 #define UQF_UMTXQ 0x0001
123 /* The thread waits on. */
124 struct thread *uq_thread;
127 * Blocked on PI mutex. read can use chain lock
128 * or umtx_lock, write must have both chain lock and
129 * umtx_lock being hold.
131 struct umtx_pi *uq_pi_blocked;
133 /* On blocked list */
134 TAILQ_ENTRY(umtx_q) uq_lockq;
136 /* Thread contending with us */
137 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
139 /* Inherited priority from PP mutex */
140 u_char uq_inherited_pri;
142 /* Spare queue ready to be reused */
143 struct umtxq_queue *uq_spare_queue;
145 /* The queue we on */
146 struct umtxq_queue *uq_cur_queue;
149 TAILQ_HEAD(umtxq_head, umtx_q);
151 /* Per-key wait-queue */
153 struct umtxq_head head;
155 LIST_ENTRY(umtxq_queue) link;
159 LIST_HEAD(umtxq_list, umtxq_queue);
161 /* Userland lock object's wait-queue chain */
163 /* Lock for this chain. */
166 /* List of sleep queues. */
167 struct umtxq_list uc_queue[2];
168 #define UMTX_SHARED_QUEUE 0
169 #define UMTX_EXCLUSIVE_QUEUE 1
171 LIST_HEAD(, umtxq_queue) uc_spare_queue;
176 /* Chain lock waiters */
179 /* All PI in the list */
180 TAILQ_HEAD(,umtx_pi) uc_pi_list;
182 #ifdef UMTX_PROFILING
188 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
191 * Don't propagate time-sharing priority, there is a security reason,
192 * a user can simply introduce PI-mutex, let thread A lock the mutex,
193 * and let another thread B block on the mutex, because B is
194 * sleeping, its priority will be boosted, this causes A's priority to
195 * be boosted via priority propagating too and will never be lowered even
196 * if it is using 100%CPU, this is unfair to other processes.
199 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
200 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
201 PRI_MAX_TIMESHARE : (td)->td_user_pri)
203 #define GOLDEN_RATIO_PRIME 2654404609U
205 #define UMTX_CHAINS 512
207 #define UMTX_SHIFTS (__WORD_BIT - 9)
209 #define GET_SHARE(flags) \
210 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
212 #define BUSY_SPINS 200
216 bool is_abs_real; /* TIMER_ABSTIME && CLOCK_REALTIME* */
221 #ifdef COMPAT_FREEBSD32
223 volatile __lwpid_t m_owner; /* Owner of the mutex */
224 __uint32_t m_flags; /* Flags of the mutex */
225 __uint32_t m_ceilings[2]; /* Priority protect ceiling */
226 __uint32_t m_rb_lnk; /* Robust linkage */
228 __uint32_t m_spare[2];
231 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
232 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
233 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
236 int umtx_shm_vnobj_persistent = 0;
237 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
238 &umtx_shm_vnobj_persistent, 0,
239 "False forces destruction of umtx attached to file, on last close");
240 static int umtx_max_rb = 1000;
241 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
245 static uma_zone_t umtx_pi_zone;
246 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
247 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
248 static int umtx_pi_allocated;
250 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
251 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
252 &umtx_pi_allocated, 0, "Allocated umtx_pi");
253 static int umtx_verbose_rb = 1;
254 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
258 #ifdef UMTX_PROFILING
259 static long max_length;
260 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
261 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
264 static void abs_timeout_update(struct abs_timeout *timo);
266 static void umtx_shm_init(void);
267 static void umtxq_sysinit(void *);
268 static void umtxq_hash(struct umtx_key *key);
269 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
270 static void umtxq_lock(struct umtx_key *key);
271 static void umtxq_unlock(struct umtx_key *key);
272 static void umtxq_busy(struct umtx_key *key);
273 static void umtxq_unbusy(struct umtx_key *key);
274 static void umtxq_insert_queue(struct umtx_q *uq, int q);
275 static void umtxq_remove_queue(struct umtx_q *uq, int q);
276 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
277 static int umtxq_count(struct umtx_key *key);
278 static struct umtx_pi *umtx_pi_alloc(int);
279 static void umtx_pi_free(struct umtx_pi *pi);
280 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
282 static void umtx_thread_cleanup(struct thread *td);
283 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
284 struct image_params *imgp __unused);
285 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
287 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
288 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
289 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
291 static struct mtx umtx_lock;
293 #ifdef UMTX_PROFILING
295 umtx_init_profiling(void)
297 struct sysctl_oid *chain_oid;
301 for (i = 0; i < UMTX_CHAINS; ++i) {
302 snprintf(chain_name, sizeof(chain_name), "%d", i);
303 chain_oid = SYSCTL_ADD_NODE(NULL,
304 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
305 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
306 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
307 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
308 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
309 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
314 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
318 struct umtxq_chain *uc;
319 u_int fract, i, j, tot, whole;
320 u_int sf0, sf1, sf2, sf3, sf4;
321 u_int si0, si1, si2, si3, si4;
322 u_int sw0, sw1, sw2, sw3, sw4;
324 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
325 for (i = 0; i < 2; i++) {
327 for (j = 0; j < UMTX_CHAINS; ++j) {
328 uc = &umtxq_chains[i][j];
329 mtx_lock(&uc->uc_lock);
330 tot += uc->max_length;
331 mtx_unlock(&uc->uc_lock);
334 sbuf_printf(&sb, "%u) Empty ", i);
336 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
337 si0 = si1 = si2 = si3 = si4 = 0;
338 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
339 for (j = 0; j < UMTX_CHAINS; j++) {
340 uc = &umtxq_chains[i][j];
341 mtx_lock(&uc->uc_lock);
342 whole = uc->max_length * 100;
343 mtx_unlock(&uc->uc_lock);
344 fract = (whole % tot) * 100;
345 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
349 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
354 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
359 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
364 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
371 sbuf_printf(&sb, "queue %u:\n", i);
372 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
374 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
376 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
378 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
380 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
386 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
392 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
394 struct umtxq_chain *uc;
399 error = sysctl_handle_int(oidp, &clear, 0, req);
400 if (error != 0 || req->newptr == NULL)
404 for (i = 0; i < 2; ++i) {
405 for (j = 0; j < UMTX_CHAINS; ++j) {
406 uc = &umtxq_chains[i][j];
407 mtx_lock(&uc->uc_lock);
410 mtx_unlock(&uc->uc_lock);
417 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
418 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
419 sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics");
420 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
421 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
422 sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length");
426 umtxq_sysinit(void *arg __unused)
430 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
431 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
432 for (i = 0; i < 2; ++i) {
433 for (j = 0; j < UMTX_CHAINS; ++j) {
434 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
435 MTX_DEF | MTX_DUPOK);
436 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
437 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
438 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
439 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
440 umtxq_chains[i][j].uc_busy = 0;
441 umtxq_chains[i][j].uc_waiters = 0;
442 #ifdef UMTX_PROFILING
443 umtxq_chains[i][j].length = 0;
444 umtxq_chains[i][j].max_length = 0;
448 #ifdef UMTX_PROFILING
449 umtx_init_profiling();
451 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
452 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
453 EVENTHANDLER_PRI_ANY);
462 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
463 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
465 TAILQ_INIT(&uq->uq_spare_queue->head);
466 TAILQ_INIT(&uq->uq_pi_contested);
467 uq->uq_inherited_pri = PRI_MAX;
472 umtxq_free(struct umtx_q *uq)
475 MPASS(uq->uq_spare_queue != NULL);
476 free(uq->uq_spare_queue, M_UMTX);
481 umtxq_hash(struct umtx_key *key)
485 n = (uintptr_t)key->info.both.a + key->info.both.b;
486 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
489 static inline struct umtxq_chain *
490 umtxq_getchain(struct umtx_key *key)
493 if (key->type <= TYPE_SEM)
494 return (&umtxq_chains[1][key->hash]);
495 return (&umtxq_chains[0][key->hash]);
502 umtxq_lock(struct umtx_key *key)
504 struct umtxq_chain *uc;
506 uc = umtxq_getchain(key);
507 mtx_lock(&uc->uc_lock);
514 umtxq_unlock(struct umtx_key *key)
516 struct umtxq_chain *uc;
518 uc = umtxq_getchain(key);
519 mtx_unlock(&uc->uc_lock);
523 * Set chain to busy state when following operation
524 * may be blocked (kernel mutex can not be used).
527 umtxq_busy(struct umtx_key *key)
529 struct umtxq_chain *uc;
531 uc = umtxq_getchain(key);
532 mtx_assert(&uc->uc_lock, MA_OWNED);
536 int count = BUSY_SPINS;
539 while (uc->uc_busy && --count > 0)
545 while (uc->uc_busy) {
547 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
558 umtxq_unbusy(struct umtx_key *key)
560 struct umtxq_chain *uc;
562 uc = umtxq_getchain(key);
563 mtx_assert(&uc->uc_lock, MA_OWNED);
564 KASSERT(uc->uc_busy != 0, ("not busy"));
571 umtxq_unbusy_unlocked(struct umtx_key *key)
579 static struct umtxq_queue *
580 umtxq_queue_lookup(struct umtx_key *key, int q)
582 struct umtxq_queue *uh;
583 struct umtxq_chain *uc;
585 uc = umtxq_getchain(key);
586 UMTXQ_LOCKED_ASSERT(uc);
587 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
588 if (umtx_key_match(&uh->key, key))
596 umtxq_insert_queue(struct umtx_q *uq, int q)
598 struct umtxq_queue *uh;
599 struct umtxq_chain *uc;
601 uc = umtxq_getchain(&uq->uq_key);
602 UMTXQ_LOCKED_ASSERT(uc);
603 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
604 uh = umtxq_queue_lookup(&uq->uq_key, q);
606 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
608 uh = uq->uq_spare_queue;
609 uh->key = uq->uq_key;
610 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
611 #ifdef UMTX_PROFILING
613 if (uc->length > uc->max_length) {
614 uc->max_length = uc->length;
615 if (uc->max_length > max_length)
616 max_length = uc->max_length;
620 uq->uq_spare_queue = NULL;
622 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
624 uq->uq_flags |= UQF_UMTXQ;
625 uq->uq_cur_queue = uh;
630 umtxq_remove_queue(struct umtx_q *uq, int q)
632 struct umtxq_chain *uc;
633 struct umtxq_queue *uh;
635 uc = umtxq_getchain(&uq->uq_key);
636 UMTXQ_LOCKED_ASSERT(uc);
637 if (uq->uq_flags & UQF_UMTXQ) {
638 uh = uq->uq_cur_queue;
639 TAILQ_REMOVE(&uh->head, uq, uq_link);
641 uq->uq_flags &= ~UQF_UMTXQ;
642 if (TAILQ_EMPTY(&uh->head)) {
643 KASSERT(uh->length == 0,
644 ("inconsistent umtxq_queue length"));
645 #ifdef UMTX_PROFILING
648 LIST_REMOVE(uh, link);
650 uh = LIST_FIRST(&uc->uc_spare_queue);
651 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
652 LIST_REMOVE(uh, link);
654 uq->uq_spare_queue = uh;
655 uq->uq_cur_queue = NULL;
660 * Check if there are multiple waiters
663 umtxq_count(struct umtx_key *key)
665 struct umtxq_queue *uh;
667 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
668 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
675 * Check if there are multiple PI waiters and returns first
679 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
681 struct umtxq_queue *uh;
684 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
685 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
687 *first = TAILQ_FIRST(&uh->head);
694 umtxq_check_susp(struct thread *td)
700 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
701 * eventually break the lockstep loop.
703 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
708 if (P_SHOULDSTOP(p) ||
709 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
710 if (p->p_flag & P_SINGLE_EXIT)
720 * Wake up threads waiting on an userland object.
724 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
726 struct umtxq_queue *uh;
731 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
732 uh = umtxq_queue_lookup(key, q);
734 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
735 umtxq_remove_queue(uq, q);
746 * Wake up specified thread.
749 umtxq_signal_thread(struct umtx_q *uq)
752 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
758 tstohz(const struct timespec *tsp)
762 TIMESPEC_TO_TIMEVAL(&tv, tsp);
767 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
768 const struct timespec *timeout)
771 timo->clockid = clockid;
773 timo->is_abs_real = false;
774 abs_timeout_update(timo);
775 timespecadd(&timo->cur, timeout, &timo->end);
777 timo->end = *timeout;
778 timo->is_abs_real = clockid == CLOCK_REALTIME ||
779 clockid == CLOCK_REALTIME_FAST ||
780 clockid == CLOCK_REALTIME_PRECISE;
782 * If is_abs_real, umtxq_sleep will read the clock
783 * after setting td_rtcgen; otherwise, read it here.
785 if (!timo->is_abs_real) {
786 abs_timeout_update(timo);
792 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
795 abs_timeout_init(timo, umtxtime->_clockid,
796 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
800 abs_timeout_update(struct abs_timeout *timo)
803 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
807 abs_timeout_gethz(struct abs_timeout *timo)
811 if (timespeccmp(&timo->end, &timo->cur, <=))
813 timespecsub(&timo->end, &timo->cur, &tts);
814 return (tstohz(&tts));
818 umtx_unlock_val(uint32_t flags, bool rb)
822 return (UMUTEX_RB_OWNERDEAD);
823 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
824 return (UMUTEX_RB_NOTRECOV);
826 return (UMUTEX_UNOWNED);
831 * Put thread into sleep state, before sleeping, check if
832 * thread was removed from umtx queue.
835 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
837 struct umtxq_chain *uc;
840 if (abstime != NULL && abstime->is_abs_real) {
841 curthread->td_rtcgen = atomic_load_acq_int(&rtc_generation);
842 abs_timeout_update(abstime);
845 uc = umtxq_getchain(&uq->uq_key);
846 UMTXQ_LOCKED_ASSERT(uc);
848 if (!(uq->uq_flags & UQF_UMTXQ)) {
852 if (abstime != NULL) {
853 timo = abs_timeout_gethz(abstime);
860 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
861 if (error == EINTR || error == ERESTART) {
862 umtxq_lock(&uq->uq_key);
865 if (abstime != NULL) {
866 if (abstime->is_abs_real)
867 curthread->td_rtcgen =
868 atomic_load_acq_int(&rtc_generation);
869 abs_timeout_update(abstime);
871 umtxq_lock(&uq->uq_key);
874 curthread->td_rtcgen = 0;
879 * Convert userspace address into unique logical address.
882 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
884 struct thread *td = curthread;
886 vm_map_entry_t entry;
892 if (share == THREAD_SHARE) {
894 key->info.private.vs = td->td_proc->p_vmspace;
895 key->info.private.addr = (uintptr_t)addr;
897 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
898 map = &td->td_proc->p_vmspace->vm_map;
899 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
900 &entry, &key->info.shared.object, &pindex, &prot,
901 &wired) != KERN_SUCCESS) {
905 if ((share == PROCESS_SHARE) ||
906 (share == AUTO_SHARE &&
907 VM_INHERIT_SHARE == entry->inheritance)) {
909 key->info.shared.offset = (vm_offset_t)addr -
910 entry->start + entry->offset;
911 vm_object_reference(key->info.shared.object);
914 key->info.private.vs = td->td_proc->p_vmspace;
915 key->info.private.addr = (uintptr_t)addr;
917 vm_map_lookup_done(map, entry);
928 umtx_key_release(struct umtx_key *key)
931 vm_object_deallocate(key->info.shared.object);
935 * Fetch and compare value, sleep on the address if value is not changed.
938 do_wait(struct thread *td, void *addr, u_long id,
939 struct _umtx_time *timeout, int compat32, int is_private)
941 struct abs_timeout timo;
948 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
949 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
953 abs_timeout_init2(&timo, timeout);
955 umtxq_lock(&uq->uq_key);
957 umtxq_unlock(&uq->uq_key);
959 error = fueword(addr, &tmp);
963 error = fueword32(addr, &tmp32);
969 umtxq_lock(&uq->uq_key);
972 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
974 if ((uq->uq_flags & UQF_UMTXQ) == 0)
978 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
981 umtxq_unlock(&uq->uq_key);
982 umtx_key_release(&uq->uq_key);
983 if (error == ERESTART)
989 * Wake up threads sleeping on the specified address.
992 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
997 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
998 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1001 umtxq_signal(&key, n_wake);
1003 umtx_key_release(&key);
1008 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1011 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1012 struct _umtx_time *timeout, int mode)
1014 struct abs_timeout timo;
1016 uint32_t owner, old, id;
1022 if (timeout != NULL)
1023 abs_timeout_init2(&timo, timeout);
1026 * Care must be exercised when dealing with umtx structure. It
1027 * can fault on any access.
1030 rv = fueword32(&m->m_owner, &owner);
1033 if (mode == _UMUTEX_WAIT) {
1034 if (owner == UMUTEX_UNOWNED ||
1035 owner == UMUTEX_CONTESTED ||
1036 owner == UMUTEX_RB_OWNERDEAD ||
1037 owner == UMUTEX_RB_NOTRECOV)
1041 * Robust mutex terminated. Kernel duty is to
1042 * return EOWNERDEAD to the userspace. The
1043 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1044 * by the common userspace code.
1046 if (owner == UMUTEX_RB_OWNERDEAD) {
1047 rv = casueword32(&m->m_owner,
1048 UMUTEX_RB_OWNERDEAD, &owner,
1049 id | UMUTEX_CONTESTED);
1052 if (owner == UMUTEX_RB_OWNERDEAD)
1053 return (EOWNERDEAD); /* success */
1054 rv = umtxq_check_susp(td);
1059 if (owner == UMUTEX_RB_NOTRECOV)
1060 return (ENOTRECOVERABLE);
1064 * Try the uncontested case. This should be
1067 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1069 /* The address was invalid. */
1073 /* The acquire succeeded. */
1074 if (owner == UMUTEX_UNOWNED)
1078 * If no one owns it but it is contested try
1081 if (owner == UMUTEX_CONTESTED) {
1082 rv = casueword32(&m->m_owner,
1083 UMUTEX_CONTESTED, &owner,
1084 id | UMUTEX_CONTESTED);
1085 /* The address was invalid. */
1089 if (owner == UMUTEX_CONTESTED)
1092 rv = umtxq_check_susp(td);
1097 * If this failed the lock has
1104 if (mode == _UMUTEX_TRY)
1108 * If we caught a signal, we have retried and now
1114 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1115 GET_SHARE(flags), &uq->uq_key)) != 0)
1118 umtxq_lock(&uq->uq_key);
1119 umtxq_busy(&uq->uq_key);
1121 umtxq_unlock(&uq->uq_key);
1124 * Set the contested bit so that a release in user space
1125 * knows to use the system call for unlock. If this fails
1126 * either some one else has acquired the lock or it has been
1129 rv = casueword32(&m->m_owner, owner, &old,
1130 owner | UMUTEX_CONTESTED);
1132 /* The address was invalid. */
1134 umtxq_lock(&uq->uq_key);
1136 umtxq_unbusy(&uq->uq_key);
1137 umtxq_unlock(&uq->uq_key);
1138 umtx_key_release(&uq->uq_key);
1143 * We set the contested bit, sleep. Otherwise the lock changed
1144 * and we need to retry or we lost a race to the thread
1145 * unlocking the umtx.
1147 umtxq_lock(&uq->uq_key);
1148 umtxq_unbusy(&uq->uq_key);
1150 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1153 umtxq_unlock(&uq->uq_key);
1154 umtx_key_release(&uq->uq_key);
1157 error = umtxq_check_susp(td);
1164 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1167 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1169 struct umtx_key key;
1170 uint32_t owner, old, id, newlock;
1175 * Make sure we own this mtx.
1177 error = fueword32(&m->m_owner, &owner);
1181 if ((owner & ~UMUTEX_CONTESTED) != id)
1184 newlock = umtx_unlock_val(flags, rb);
1185 if ((owner & UMUTEX_CONTESTED) == 0) {
1186 error = casueword32(&m->m_owner, owner, &old, newlock);
1194 /* We should only ever be in here for contested locks */
1195 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1201 count = umtxq_count(&key);
1205 * When unlocking the umtx, it must be marked as unowned if
1206 * there is zero or one thread only waiting for it.
1207 * Otherwise, it must be marked as contested.
1210 newlock |= UMUTEX_CONTESTED;
1211 error = casueword32(&m->m_owner, owner, &old, newlock);
1213 umtxq_signal(&key, 1);
1216 umtx_key_release(&key);
1225 * Check if the mutex is available and wake up a waiter,
1226 * only for simple mutex.
1229 do_wake_umutex(struct thread *td, struct umutex *m)
1231 struct umtx_key key;
1237 error = fueword32(&m->m_owner, &owner);
1241 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1242 owner != UMUTEX_RB_NOTRECOV)
1245 error = fueword32(&m->m_flags, &flags);
1249 /* We should only ever be in here for contested locks */
1250 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1256 count = umtxq_count(&key);
1259 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1260 owner != UMUTEX_RB_NOTRECOV) {
1261 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1268 if (error == 0 && count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1269 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1270 umtxq_signal(&key, 1);
1273 umtx_key_release(&key);
1278 * Check if the mutex has waiters and tries to fix contention bit.
1281 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1283 struct umtx_key key;
1284 uint32_t owner, old;
1289 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1293 type = TYPE_NORMAL_UMUTEX;
1295 case UMUTEX_PRIO_INHERIT:
1296 type = TYPE_PI_UMUTEX;
1298 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1299 type = TYPE_PI_ROBUST_UMUTEX;
1301 case UMUTEX_PRIO_PROTECT:
1302 type = TYPE_PP_UMUTEX;
1304 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1305 type = TYPE_PP_ROBUST_UMUTEX;
1310 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1316 count = umtxq_count(&key);
1319 * Only repair contention bit if there is a waiter, this means the mutex
1320 * is still being referenced by userland code, otherwise don't update
1324 error = fueword32(&m->m_owner, &owner);
1327 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0) {
1328 error = casueword32(&m->m_owner, owner, &old,
1329 owner | UMUTEX_CONTESTED);
1337 error = umtxq_check_susp(td);
1341 } else if (count == 1) {
1342 error = fueword32(&m->m_owner, &owner);
1345 while (error == 0 && (owner & ~UMUTEX_CONTESTED) != 0 &&
1346 (owner & UMUTEX_CONTESTED) == 0) {
1347 error = casueword32(&m->m_owner, owner, &old,
1348 owner | UMUTEX_CONTESTED);
1356 error = umtxq_check_susp(td);
1362 if (error == EFAULT) {
1363 umtxq_signal(&key, INT_MAX);
1364 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1365 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1366 umtxq_signal(&key, 1);
1369 umtx_key_release(&key);
1373 static inline struct umtx_pi *
1374 umtx_pi_alloc(int flags)
1378 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1379 TAILQ_INIT(&pi->pi_blocked);
1380 atomic_add_int(&umtx_pi_allocated, 1);
1385 umtx_pi_free(struct umtx_pi *pi)
1387 uma_zfree(umtx_pi_zone, pi);
1388 atomic_add_int(&umtx_pi_allocated, -1);
1392 * Adjust the thread's position on a pi_state after its priority has been
1396 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1398 struct umtx_q *uq, *uq1, *uq2;
1401 mtx_assert(&umtx_lock, MA_OWNED);
1408 * Check if the thread needs to be moved on the blocked chain.
1409 * It needs to be moved if either its priority is lower than
1410 * the previous thread or higher than the next thread.
1412 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1413 uq2 = TAILQ_NEXT(uq, uq_lockq);
1414 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1415 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1417 * Remove thread from blocked chain and determine where
1418 * it should be moved to.
1420 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1421 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1422 td1 = uq1->uq_thread;
1423 MPASS(td1->td_proc->p_magic == P_MAGIC);
1424 if (UPRI(td1) > UPRI(td))
1429 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1431 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1436 static struct umtx_pi *
1437 umtx_pi_next(struct umtx_pi *pi)
1439 struct umtx_q *uq_owner;
1441 if (pi->pi_owner == NULL)
1443 uq_owner = pi->pi_owner->td_umtxq;
1444 if (uq_owner == NULL)
1446 return (uq_owner->uq_pi_blocked);
1450 * Floyd's Cycle-Finding Algorithm.
1453 umtx_pi_check_loop(struct umtx_pi *pi)
1455 struct umtx_pi *pi1; /* fast iterator */
1457 mtx_assert(&umtx_lock, MA_OWNED);
1462 pi = umtx_pi_next(pi);
1465 pi1 = umtx_pi_next(pi1);
1468 pi1 = umtx_pi_next(pi1);
1478 * Propagate priority when a thread is blocked on POSIX
1482 umtx_propagate_priority(struct thread *td)
1488 mtx_assert(&umtx_lock, MA_OWNED);
1491 pi = uq->uq_pi_blocked;
1494 if (umtx_pi_check_loop(pi))
1499 if (td == NULL || td == curthread)
1502 MPASS(td->td_proc != NULL);
1503 MPASS(td->td_proc->p_magic == P_MAGIC);
1506 if (td->td_lend_user_pri > pri)
1507 sched_lend_user_prio(td, pri);
1515 * Pick up the lock that td is blocked on.
1518 pi = uq->uq_pi_blocked;
1521 /* Resort td on the list if needed. */
1522 umtx_pi_adjust_thread(pi, td);
1527 * Unpropagate priority for a PI mutex when a thread blocked on
1528 * it is interrupted by signal or resumed by others.
1531 umtx_repropagate_priority(struct umtx_pi *pi)
1533 struct umtx_q *uq, *uq_owner;
1534 struct umtx_pi *pi2;
1537 mtx_assert(&umtx_lock, MA_OWNED);
1539 if (umtx_pi_check_loop(pi))
1541 while (pi != NULL && pi->pi_owner != NULL) {
1543 uq_owner = pi->pi_owner->td_umtxq;
1545 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1546 uq = TAILQ_FIRST(&pi2->pi_blocked);
1548 if (pri > UPRI(uq->uq_thread))
1549 pri = UPRI(uq->uq_thread);
1553 if (pri > uq_owner->uq_inherited_pri)
1554 pri = uq_owner->uq_inherited_pri;
1555 thread_lock(pi->pi_owner);
1556 sched_lend_user_prio(pi->pi_owner, pri);
1557 thread_unlock(pi->pi_owner);
1558 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1559 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1564 * Insert a PI mutex into owned list.
1567 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1569 struct umtx_q *uq_owner;
1571 uq_owner = owner->td_umtxq;
1572 mtx_assert(&umtx_lock, MA_OWNED);
1573 MPASS(pi->pi_owner == NULL);
1574 pi->pi_owner = owner;
1575 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1580 * Disown a PI mutex, and remove it from the owned list.
1583 umtx_pi_disown(struct umtx_pi *pi)
1586 mtx_assert(&umtx_lock, MA_OWNED);
1587 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1588 pi->pi_owner = NULL;
1592 * Claim ownership of a PI mutex.
1595 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1600 mtx_lock(&umtx_lock);
1601 if (pi->pi_owner == owner) {
1602 mtx_unlock(&umtx_lock);
1606 if (pi->pi_owner != NULL) {
1608 * userland may have already messed the mutex, sigh.
1610 mtx_unlock(&umtx_lock);
1613 umtx_pi_setowner(pi, owner);
1614 uq = TAILQ_FIRST(&pi->pi_blocked);
1616 pri = UPRI(uq->uq_thread);
1618 if (pri < UPRI(owner))
1619 sched_lend_user_prio(owner, pri);
1620 thread_unlock(owner);
1622 mtx_unlock(&umtx_lock);
1627 * Adjust a thread's order position in its blocked PI mutex,
1628 * this may result new priority propagating process.
1631 umtx_pi_adjust(struct thread *td, u_char oldpri)
1637 mtx_lock(&umtx_lock);
1639 * Pick up the lock that td is blocked on.
1641 pi = uq->uq_pi_blocked;
1643 umtx_pi_adjust_thread(pi, td);
1644 umtx_repropagate_priority(pi);
1646 mtx_unlock(&umtx_lock);
1650 * Sleep on a PI mutex.
1653 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1654 const char *wmesg, struct abs_timeout *timo, bool shared)
1656 struct thread *td, *td1;
1660 struct umtxq_chain *uc;
1662 uc = umtxq_getchain(&pi->pi_key);
1666 KASSERT(td == curthread, ("inconsistent uq_thread"));
1667 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
1668 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1670 mtx_lock(&umtx_lock);
1671 if (pi->pi_owner == NULL) {
1672 mtx_unlock(&umtx_lock);
1673 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
1674 mtx_lock(&umtx_lock);
1676 if (pi->pi_owner == NULL)
1677 umtx_pi_setowner(pi, td1);
1678 PROC_UNLOCK(td1->td_proc);
1682 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1683 pri = UPRI(uq1->uq_thread);
1689 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1691 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1693 uq->uq_pi_blocked = pi;
1695 td->td_flags |= TDF_UPIBLOCKED;
1697 umtx_propagate_priority(td);
1698 mtx_unlock(&umtx_lock);
1699 umtxq_unbusy(&uq->uq_key);
1701 error = umtxq_sleep(uq, wmesg, timo);
1704 mtx_lock(&umtx_lock);
1705 uq->uq_pi_blocked = NULL;
1707 td->td_flags &= ~TDF_UPIBLOCKED;
1709 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1710 umtx_repropagate_priority(pi);
1711 mtx_unlock(&umtx_lock);
1712 umtxq_unlock(&uq->uq_key);
1718 * Add reference count for a PI mutex.
1721 umtx_pi_ref(struct umtx_pi *pi)
1724 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key));
1729 * Decrease reference count for a PI mutex, if the counter
1730 * is decreased to zero, its memory space is freed.
1733 umtx_pi_unref(struct umtx_pi *pi)
1735 struct umtxq_chain *uc;
1737 uc = umtxq_getchain(&pi->pi_key);
1738 UMTXQ_LOCKED_ASSERT(uc);
1739 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1740 if (--pi->pi_refcount == 0) {
1741 mtx_lock(&umtx_lock);
1742 if (pi->pi_owner != NULL)
1744 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1745 ("blocked queue not empty"));
1746 mtx_unlock(&umtx_lock);
1747 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1753 * Find a PI mutex in hash table.
1755 static struct umtx_pi *
1756 umtx_pi_lookup(struct umtx_key *key)
1758 struct umtxq_chain *uc;
1761 uc = umtxq_getchain(key);
1762 UMTXQ_LOCKED_ASSERT(uc);
1764 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1765 if (umtx_key_match(&pi->pi_key, key)) {
1773 * Insert a PI mutex into hash table.
1776 umtx_pi_insert(struct umtx_pi *pi)
1778 struct umtxq_chain *uc;
1780 uc = umtxq_getchain(&pi->pi_key);
1781 UMTXQ_LOCKED_ASSERT(uc);
1782 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1789 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1790 struct _umtx_time *timeout, int try)
1792 struct abs_timeout timo;
1794 struct umtx_pi *pi, *new_pi;
1795 uint32_t id, old_owner, owner, old;
1801 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
1802 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
1806 if (timeout != NULL)
1807 abs_timeout_init2(&timo, timeout);
1809 umtxq_lock(&uq->uq_key);
1810 pi = umtx_pi_lookup(&uq->uq_key);
1812 new_pi = umtx_pi_alloc(M_NOWAIT);
1813 if (new_pi == NULL) {
1814 umtxq_unlock(&uq->uq_key);
1815 new_pi = umtx_pi_alloc(M_WAITOK);
1816 umtxq_lock(&uq->uq_key);
1817 pi = umtx_pi_lookup(&uq->uq_key);
1819 umtx_pi_free(new_pi);
1823 if (new_pi != NULL) {
1824 new_pi->pi_key = uq->uq_key;
1825 umtx_pi_insert(new_pi);
1830 umtxq_unlock(&uq->uq_key);
1833 * Care must be exercised when dealing with umtx structure. It
1834 * can fault on any access.
1838 * Try the uncontested case. This should be done in userland.
1840 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
1841 /* The address was invalid. */
1847 /* The acquire succeeded. */
1848 if (owner == UMUTEX_UNOWNED) {
1853 if (owner == UMUTEX_RB_NOTRECOV) {
1854 error = ENOTRECOVERABLE;
1858 /* If no one owns it but it is contested try to acquire it. */
1859 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
1861 rv = casueword32(&m->m_owner, owner, &owner,
1862 id | UMUTEX_CONTESTED);
1863 /* The address was invalid. */
1869 if (owner == old_owner) {
1870 umtxq_lock(&uq->uq_key);
1871 umtxq_busy(&uq->uq_key);
1872 error = umtx_pi_claim(pi, td);
1873 umtxq_unbusy(&uq->uq_key);
1874 umtxq_unlock(&uq->uq_key);
1877 * Since we're going to return an
1878 * error, restore the m_owner to its
1879 * previous, unowned state to avoid
1880 * compounding the problem.
1882 (void)casuword32(&m->m_owner,
1883 id | UMUTEX_CONTESTED,
1887 old_owner == UMUTEX_RB_OWNERDEAD)
1892 error = umtxq_check_susp(td);
1896 /* If this failed the lock has changed, restart. */
1900 if ((owner & ~UMUTEX_CONTESTED) == id) {
1911 * If we caught a signal, we have retried and now
1917 umtxq_lock(&uq->uq_key);
1918 umtxq_busy(&uq->uq_key);
1919 umtxq_unlock(&uq->uq_key);
1922 * Set the contested bit so that a release in user space
1923 * knows to use the system call for unlock. If this fails
1924 * either some one else has acquired the lock or it has been
1927 rv = casueword32(&m->m_owner, owner, &old, owner |
1930 /* The address was invalid. */
1932 umtxq_unbusy_unlocked(&uq->uq_key);
1937 umtxq_lock(&uq->uq_key);
1939 * We set the contested bit, sleep. Otherwise the lock changed
1940 * and we need to retry or we lost a race to the thread
1941 * unlocking the umtx. Note that the UMUTEX_RB_OWNERDEAD
1942 * value for owner is impossible there.
1945 error = umtxq_sleep_pi(uq, pi,
1946 owner & ~UMUTEX_CONTESTED,
1947 "umtxpi", timeout == NULL ? NULL : &timo,
1948 (flags & USYNC_PROCESS_SHARED) != 0);
1952 umtxq_unbusy(&uq->uq_key);
1953 umtxq_unlock(&uq->uq_key);
1956 error = umtxq_check_susp(td);
1961 umtxq_lock(&uq->uq_key);
1963 umtxq_unlock(&uq->uq_key);
1965 umtx_key_release(&uq->uq_key);
1970 * Unlock a PI mutex.
1973 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1975 struct umtx_key key;
1976 struct umtx_q *uq_first, *uq_first2, *uq_me;
1977 struct umtx_pi *pi, *pi2;
1978 uint32_t id, new_owner, old, owner;
1979 int count, error, pri;
1983 * Make sure we own this mtx.
1985 error = fueword32(&m->m_owner, &owner);
1989 if ((owner & ~UMUTEX_CONTESTED) != id)
1992 new_owner = umtx_unlock_val(flags, rb);
1994 /* This should be done in userland */
1995 if ((owner & UMUTEX_CONTESTED) == 0) {
1996 error = casueword32(&m->m_owner, owner, &old, new_owner);
2004 /* We should only ever be in here for contested locks */
2005 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2006 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2012 count = umtxq_count_pi(&key, &uq_first);
2013 if (uq_first != NULL) {
2014 mtx_lock(&umtx_lock);
2015 pi = uq_first->uq_pi_blocked;
2016 KASSERT(pi != NULL, ("pi == NULL?"));
2017 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2018 mtx_unlock(&umtx_lock);
2021 umtx_key_release(&key);
2022 /* userland messed the mutex */
2025 uq_me = td->td_umtxq;
2026 if (pi->pi_owner == td)
2028 /* get highest priority thread which is still sleeping. */
2029 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2030 while (uq_first != NULL &&
2031 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2032 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2035 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2036 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2037 if (uq_first2 != NULL) {
2038 if (pri > UPRI(uq_first2->uq_thread))
2039 pri = UPRI(uq_first2->uq_thread);
2043 sched_lend_user_prio(td, pri);
2045 mtx_unlock(&umtx_lock);
2047 umtxq_signal_thread(uq_first);
2049 pi = umtx_pi_lookup(&key);
2051 * A umtx_pi can exist if a signal or timeout removed the
2052 * last waiter from the umtxq, but there is still
2053 * a thread in do_lock_pi() holding the umtx_pi.
2057 * The umtx_pi can be unowned, such as when a thread
2058 * has just entered do_lock_pi(), allocated the
2059 * umtx_pi, and unlocked the umtxq.
2060 * If the current thread owns it, it must disown it.
2062 mtx_lock(&umtx_lock);
2063 if (pi->pi_owner == td)
2065 mtx_unlock(&umtx_lock);
2071 * When unlocking the umtx, it must be marked as unowned if
2072 * there is zero or one thread only waiting for it.
2073 * Otherwise, it must be marked as contested.
2077 new_owner |= UMUTEX_CONTESTED;
2078 error = casueword32(&m->m_owner, owner, &old, new_owner);
2080 umtxq_unbusy_unlocked(&key);
2081 umtx_key_release(&key);
2093 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2094 struct _umtx_time *timeout, int try)
2096 struct abs_timeout timo;
2097 struct umtx_q *uq, *uq2;
2101 int error, pri, old_inherited_pri, su, rv;
2105 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2106 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2110 if (timeout != NULL)
2111 abs_timeout_init2(&timo, timeout);
2113 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2115 old_inherited_pri = uq->uq_inherited_pri;
2116 umtxq_lock(&uq->uq_key);
2117 umtxq_busy(&uq->uq_key);
2118 umtxq_unlock(&uq->uq_key);
2120 rv = fueword32(&m->m_ceilings[0], &ceiling);
2125 ceiling = RTP_PRIO_MAX - ceiling;
2126 if (ceiling > RTP_PRIO_MAX) {
2131 mtx_lock(&umtx_lock);
2132 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2133 mtx_unlock(&umtx_lock);
2137 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2138 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2140 if (uq->uq_inherited_pri < UPRI(td))
2141 sched_lend_user_prio(td, uq->uq_inherited_pri);
2144 mtx_unlock(&umtx_lock);
2146 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2147 id | UMUTEX_CONTESTED);
2148 /* The address was invalid. */
2154 if (owner == UMUTEX_CONTESTED) {
2157 } else if (owner == UMUTEX_RB_OWNERDEAD) {
2158 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2159 &owner, id | UMUTEX_CONTESTED);
2164 if (owner == UMUTEX_RB_OWNERDEAD) {
2165 error = EOWNERDEAD; /* success */
2169 } else if (owner == UMUTEX_RB_NOTRECOV) {
2170 error = ENOTRECOVERABLE;
2180 * If we caught a signal, we have retried and now
2186 umtxq_lock(&uq->uq_key);
2188 umtxq_unbusy(&uq->uq_key);
2189 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2192 umtxq_unlock(&uq->uq_key);
2194 mtx_lock(&umtx_lock);
2195 uq->uq_inherited_pri = old_inherited_pri;
2197 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2198 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2200 if (pri > UPRI(uq2->uq_thread))
2201 pri = UPRI(uq2->uq_thread);
2204 if (pri > uq->uq_inherited_pri)
2205 pri = uq->uq_inherited_pri;
2207 sched_lend_user_prio(td, pri);
2209 mtx_unlock(&umtx_lock);
2212 if (error != 0 && error != EOWNERDEAD) {
2213 mtx_lock(&umtx_lock);
2214 uq->uq_inherited_pri = old_inherited_pri;
2216 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2217 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2219 if (pri > UPRI(uq2->uq_thread))
2220 pri = UPRI(uq2->uq_thread);
2223 if (pri > uq->uq_inherited_pri)
2224 pri = uq->uq_inherited_pri;
2226 sched_lend_user_prio(td, pri);
2228 mtx_unlock(&umtx_lock);
2232 umtxq_unbusy_unlocked(&uq->uq_key);
2233 umtx_key_release(&uq->uq_key);
2238 * Unlock a PP mutex.
2241 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2243 struct umtx_key key;
2244 struct umtx_q *uq, *uq2;
2246 uint32_t id, owner, rceiling;
2247 int error, pri, new_inherited_pri, su;
2251 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2254 * Make sure we own this mtx.
2256 error = fueword32(&m->m_owner, &owner);
2260 if ((owner & ~UMUTEX_CONTESTED) != id)
2263 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2268 new_inherited_pri = PRI_MAX;
2270 rceiling = RTP_PRIO_MAX - rceiling;
2271 if (rceiling > RTP_PRIO_MAX)
2273 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2276 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2277 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2284 * For priority protected mutex, always set unlocked state
2285 * to UMUTEX_CONTESTED, so that userland always enters kernel
2286 * to lock the mutex, it is necessary because thread priority
2287 * has to be adjusted for such mutex.
2289 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2294 umtxq_signal(&key, 1);
2301 mtx_lock(&umtx_lock);
2303 uq->uq_inherited_pri = new_inherited_pri;
2305 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2306 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2308 if (pri > UPRI(uq2->uq_thread))
2309 pri = UPRI(uq2->uq_thread);
2312 if (pri > uq->uq_inherited_pri)
2313 pri = uq->uq_inherited_pri;
2315 sched_lend_user_prio(td, pri);
2317 mtx_unlock(&umtx_lock);
2319 umtx_key_release(&key);
2324 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2325 uint32_t *old_ceiling)
2328 uint32_t flags, id, owner, save_ceiling;
2331 error = fueword32(&m->m_flags, &flags);
2334 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2336 if (ceiling > RTP_PRIO_MAX)
2340 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2341 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2345 umtxq_lock(&uq->uq_key);
2346 umtxq_busy(&uq->uq_key);
2347 umtxq_unlock(&uq->uq_key);
2349 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2355 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2356 id | UMUTEX_CONTESTED);
2362 if (owner == UMUTEX_CONTESTED) {
2363 rv = suword32(&m->m_ceilings[0], ceiling);
2364 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2365 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2369 if ((owner & ~UMUTEX_CONTESTED) == id) {
2370 rv = suword32(&m->m_ceilings[0], ceiling);
2371 error = rv == 0 ? 0 : EFAULT;
2375 if (owner == UMUTEX_RB_OWNERDEAD) {
2378 } else if (owner == UMUTEX_RB_NOTRECOV) {
2379 error = ENOTRECOVERABLE;
2384 * If we caught a signal, we have retried and now
2391 * We set the contested bit, sleep. Otherwise the lock changed
2392 * and we need to retry or we lost a race to the thread
2393 * unlocking the umtx.
2395 umtxq_lock(&uq->uq_key);
2397 umtxq_unbusy(&uq->uq_key);
2398 error = umtxq_sleep(uq, "umtxpp", NULL);
2400 umtxq_unlock(&uq->uq_key);
2402 umtxq_lock(&uq->uq_key);
2404 umtxq_signal(&uq->uq_key, INT_MAX);
2405 umtxq_unbusy(&uq->uq_key);
2406 umtxq_unlock(&uq->uq_key);
2407 umtx_key_release(&uq->uq_key);
2408 if (error == 0 && old_ceiling != NULL) {
2409 rv = suword32(old_ceiling, save_ceiling);
2410 error = rv == 0 ? 0 : EFAULT;
2416 * Lock a userland POSIX mutex.
2419 do_lock_umutex(struct thread *td, struct umutex *m,
2420 struct _umtx_time *timeout, int mode)
2425 error = fueword32(&m->m_flags, &flags);
2429 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2431 error = do_lock_normal(td, m, flags, timeout, mode);
2433 case UMUTEX_PRIO_INHERIT:
2434 error = do_lock_pi(td, m, flags, timeout, mode);
2436 case UMUTEX_PRIO_PROTECT:
2437 error = do_lock_pp(td, m, flags, timeout, mode);
2442 if (timeout == NULL) {
2443 if (error == EINTR && mode != _UMUTEX_WAIT)
2446 /* Timed-locking is not restarted. */
2447 if (error == ERESTART)
2454 * Unlock a userland POSIX mutex.
2457 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2462 error = fueword32(&m->m_flags, &flags);
2466 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2468 return (do_unlock_normal(td, m, flags, rb));
2469 case UMUTEX_PRIO_INHERIT:
2470 return (do_unlock_pi(td, m, flags, rb));
2471 case UMUTEX_PRIO_PROTECT:
2472 return (do_unlock_pp(td, m, flags, rb));
2479 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2480 struct timespec *timeout, u_long wflags)
2482 struct abs_timeout timo;
2484 uint32_t flags, clockid, hasw;
2488 error = fueword32(&cv->c_flags, &flags);
2491 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2495 if ((wflags & CVWAIT_CLOCKID) != 0) {
2496 error = fueword32(&cv->c_clockid, &clockid);
2498 umtx_key_release(&uq->uq_key);
2501 if (clockid < CLOCK_REALTIME ||
2502 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2503 /* hmm, only HW clock id will work. */
2504 umtx_key_release(&uq->uq_key);
2508 clockid = CLOCK_REALTIME;
2511 umtxq_lock(&uq->uq_key);
2512 umtxq_busy(&uq->uq_key);
2514 umtxq_unlock(&uq->uq_key);
2517 * Set c_has_waiters to 1 before releasing user mutex, also
2518 * don't modify cache line when unnecessary.
2520 error = fueword32(&cv->c_has_waiters, &hasw);
2521 if (error == 0 && hasw == 0)
2522 suword32(&cv->c_has_waiters, 1);
2524 umtxq_unbusy_unlocked(&uq->uq_key);
2526 error = do_unlock_umutex(td, m, false);
2528 if (timeout != NULL)
2529 abs_timeout_init(&timo, clockid, (wflags & CVWAIT_ABSTIME) != 0,
2532 umtxq_lock(&uq->uq_key);
2534 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2538 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2542 * This must be timeout,interrupted by signal or
2543 * surprious wakeup, clear c_has_waiter flag when
2546 umtxq_busy(&uq->uq_key);
2547 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2548 int oldlen = uq->uq_cur_queue->length;
2551 umtxq_unlock(&uq->uq_key);
2552 suword32(&cv->c_has_waiters, 0);
2553 umtxq_lock(&uq->uq_key);
2556 umtxq_unbusy(&uq->uq_key);
2557 if (error == ERESTART)
2561 umtxq_unlock(&uq->uq_key);
2562 umtx_key_release(&uq->uq_key);
2567 * Signal a userland condition variable.
2570 do_cv_signal(struct thread *td, struct ucond *cv)
2572 struct umtx_key key;
2573 int error, cnt, nwake;
2576 error = fueword32(&cv->c_flags, &flags);
2579 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2583 cnt = umtxq_count(&key);
2584 nwake = umtxq_signal(&key, 1);
2587 error = suword32(&cv->c_has_waiters, 0);
2594 umtx_key_release(&key);
2599 do_cv_broadcast(struct thread *td, struct ucond *cv)
2601 struct umtx_key key;
2605 error = fueword32(&cv->c_flags, &flags);
2608 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2613 umtxq_signal(&key, INT_MAX);
2616 error = suword32(&cv->c_has_waiters, 0);
2620 umtxq_unbusy_unlocked(&key);
2622 umtx_key_release(&key);
2627 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
2629 struct abs_timeout timo;
2631 uint32_t flags, wrflags;
2632 int32_t state, oldstate;
2633 int32_t blocked_readers;
2634 int error, error1, rv;
2637 error = fueword32(&rwlock->rw_flags, &flags);
2640 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2644 if (timeout != NULL)
2645 abs_timeout_init2(&timo, timeout);
2647 wrflags = URWLOCK_WRITE_OWNER;
2648 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2649 wrflags |= URWLOCK_WRITE_WAITERS;
2652 rv = fueword32(&rwlock->rw_state, &state);
2654 umtx_key_release(&uq->uq_key);
2658 /* try to lock it */
2659 while (!(state & wrflags)) {
2660 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2661 umtx_key_release(&uq->uq_key);
2664 rv = casueword32(&rwlock->rw_state, state,
2665 &oldstate, state + 1);
2667 umtx_key_release(&uq->uq_key);
2670 if (oldstate == state) {
2671 umtx_key_release(&uq->uq_key);
2674 error = umtxq_check_susp(td);
2683 /* grab monitor lock */
2684 umtxq_lock(&uq->uq_key);
2685 umtxq_busy(&uq->uq_key);
2686 umtxq_unlock(&uq->uq_key);
2689 * re-read the state, in case it changed between the try-lock above
2690 * and the check below
2692 rv = fueword32(&rwlock->rw_state, &state);
2696 /* set read contention bit */
2697 while (error == 0 && (state & wrflags) &&
2698 !(state & URWLOCK_READ_WAITERS)) {
2699 rv = casueword32(&rwlock->rw_state, state,
2700 &oldstate, state | URWLOCK_READ_WAITERS);
2705 if (oldstate == state)
2708 error = umtxq_check_susp(td);
2713 umtxq_unbusy_unlocked(&uq->uq_key);
2717 /* state is changed while setting flags, restart */
2718 if (!(state & wrflags)) {
2719 umtxq_unbusy_unlocked(&uq->uq_key);
2720 error = umtxq_check_susp(td);
2727 /* contention bit is set, before sleeping, increase read waiter count */
2728 rv = fueword32(&rwlock->rw_blocked_readers,
2731 umtxq_unbusy_unlocked(&uq->uq_key);
2735 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2737 while (state & wrflags) {
2738 umtxq_lock(&uq->uq_key);
2740 umtxq_unbusy(&uq->uq_key);
2742 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2745 umtxq_busy(&uq->uq_key);
2747 umtxq_unlock(&uq->uq_key);
2750 rv = fueword32(&rwlock->rw_state, &state);
2757 /* decrease read waiter count, and may clear read contention bit */
2758 rv = fueword32(&rwlock->rw_blocked_readers,
2761 umtxq_unbusy_unlocked(&uq->uq_key);
2765 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2766 if (blocked_readers == 1) {
2767 rv = fueword32(&rwlock->rw_state, &state);
2769 umtxq_unbusy_unlocked(&uq->uq_key);
2774 rv = casueword32(&rwlock->rw_state, state,
2775 &oldstate, state & ~URWLOCK_READ_WAITERS);
2780 if (oldstate == state)
2783 error1 = umtxq_check_susp(td);
2792 umtxq_unbusy_unlocked(&uq->uq_key);
2796 umtx_key_release(&uq->uq_key);
2797 if (error == ERESTART)
2803 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2805 struct abs_timeout timo;
2808 int32_t state, oldstate;
2809 int32_t blocked_writers;
2810 int32_t blocked_readers;
2811 int error, error1, rv;
2814 error = fueword32(&rwlock->rw_flags, &flags);
2817 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2821 if (timeout != NULL)
2822 abs_timeout_init2(&timo, timeout);
2824 blocked_readers = 0;
2826 rv = fueword32(&rwlock->rw_state, &state);
2828 umtx_key_release(&uq->uq_key);
2831 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2832 rv = casueword32(&rwlock->rw_state, state,
2833 &oldstate, state | URWLOCK_WRITE_OWNER);
2835 umtx_key_release(&uq->uq_key);
2838 if (oldstate == state) {
2839 umtx_key_release(&uq->uq_key);
2843 error = umtxq_check_susp(td);
2849 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2850 blocked_readers != 0) {
2851 umtxq_lock(&uq->uq_key);
2852 umtxq_busy(&uq->uq_key);
2853 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2854 umtxq_unbusy(&uq->uq_key);
2855 umtxq_unlock(&uq->uq_key);
2861 /* grab monitor lock */
2862 umtxq_lock(&uq->uq_key);
2863 umtxq_busy(&uq->uq_key);
2864 umtxq_unlock(&uq->uq_key);
2867 * re-read the state, in case it changed between the try-lock above
2868 * and the check below
2870 rv = fueword32(&rwlock->rw_state, &state);
2874 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
2875 URWLOCK_READER_COUNT(state) != 0) &&
2876 (state & URWLOCK_WRITE_WAITERS) == 0) {
2877 rv = casueword32(&rwlock->rw_state, state,
2878 &oldstate, state | URWLOCK_WRITE_WAITERS);
2883 if (oldstate == state)
2886 error = umtxq_check_susp(td);
2891 umtxq_unbusy_unlocked(&uq->uq_key);
2895 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2896 umtxq_unbusy_unlocked(&uq->uq_key);
2897 error = umtxq_check_susp(td);
2903 rv = fueword32(&rwlock->rw_blocked_writers,
2906 umtxq_unbusy_unlocked(&uq->uq_key);
2910 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2912 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2913 umtxq_lock(&uq->uq_key);
2914 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2915 umtxq_unbusy(&uq->uq_key);
2917 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2920 umtxq_busy(&uq->uq_key);
2921 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2922 umtxq_unlock(&uq->uq_key);
2925 rv = fueword32(&rwlock->rw_state, &state);
2932 rv = fueword32(&rwlock->rw_blocked_writers,
2935 umtxq_unbusy_unlocked(&uq->uq_key);
2939 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2940 if (blocked_writers == 1) {
2941 rv = fueword32(&rwlock->rw_state, &state);
2943 umtxq_unbusy_unlocked(&uq->uq_key);
2948 rv = casueword32(&rwlock->rw_state, state,
2949 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
2954 if (oldstate == state)
2957 error1 = umtxq_check_susp(td);
2959 * We are leaving the URWLOCK_WRITE_WAITERS
2960 * behind, but this should not harm the
2969 rv = fueword32(&rwlock->rw_blocked_readers,
2972 umtxq_unbusy_unlocked(&uq->uq_key);
2977 blocked_readers = 0;
2979 umtxq_unbusy_unlocked(&uq->uq_key);
2982 umtx_key_release(&uq->uq_key);
2983 if (error == ERESTART)
2989 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
2993 int32_t state, oldstate;
2994 int error, rv, q, count;
2997 error = fueword32(&rwlock->rw_flags, &flags);
3000 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3004 error = fueword32(&rwlock->rw_state, &state);
3009 if (state & URWLOCK_WRITE_OWNER) {
3011 rv = casueword32(&rwlock->rw_state, state,
3012 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3017 if (oldstate != state) {
3019 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3023 error = umtxq_check_susp(td);
3029 } else if (URWLOCK_READER_COUNT(state) != 0) {
3031 rv = casueword32(&rwlock->rw_state, state,
3032 &oldstate, state - 1);
3037 if (oldstate != state) {
3039 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3043 error = umtxq_check_susp(td);
3056 if (!(flags & URWLOCK_PREFER_READER)) {
3057 if (state & URWLOCK_WRITE_WAITERS) {
3059 q = UMTX_EXCLUSIVE_QUEUE;
3060 } else if (state & URWLOCK_READ_WAITERS) {
3062 q = UMTX_SHARED_QUEUE;
3065 if (state & URWLOCK_READ_WAITERS) {
3067 q = UMTX_SHARED_QUEUE;
3068 } else if (state & URWLOCK_WRITE_WAITERS) {
3070 q = UMTX_EXCLUSIVE_QUEUE;
3075 umtxq_lock(&uq->uq_key);
3076 umtxq_busy(&uq->uq_key);
3077 umtxq_signal_queue(&uq->uq_key, count, q);
3078 umtxq_unbusy(&uq->uq_key);
3079 umtxq_unlock(&uq->uq_key);
3082 umtx_key_release(&uq->uq_key);
3086 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3088 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3090 struct abs_timeout timo;
3092 uint32_t flags, count, count1;
3096 error = fueword32(&sem->_flags, &flags);
3099 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3103 if (timeout != NULL)
3104 abs_timeout_init2(&timo, timeout);
3106 umtxq_lock(&uq->uq_key);
3107 umtxq_busy(&uq->uq_key);
3109 umtxq_unlock(&uq->uq_key);
3110 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3112 rv = fueword32(&sem->_count, &count);
3113 if (rv == -1 || count != 0) {
3114 umtxq_lock(&uq->uq_key);
3115 umtxq_unbusy(&uq->uq_key);
3117 umtxq_unlock(&uq->uq_key);
3118 umtx_key_release(&uq->uq_key);
3119 return (rv == -1 ? EFAULT : 0);
3121 umtxq_lock(&uq->uq_key);
3122 umtxq_unbusy(&uq->uq_key);
3124 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3126 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3130 /* A relative timeout cannot be restarted. */
3131 if (error == ERESTART && timeout != NULL &&
3132 (timeout->_flags & UMTX_ABSTIME) == 0)
3135 umtxq_unlock(&uq->uq_key);
3136 umtx_key_release(&uq->uq_key);
3141 * Signal a userland semaphore.
3144 do_sem_wake(struct thread *td, struct _usem *sem)
3146 struct umtx_key key;
3150 error = fueword32(&sem->_flags, &flags);
3153 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3157 cnt = umtxq_count(&key);
3160 * Check if count is greater than 0, this means the memory is
3161 * still being referenced by user code, so we can safely
3162 * update _has_waiters flag.
3166 error = suword32(&sem->_has_waiters, 0);
3171 umtxq_signal(&key, 1);
3175 umtx_key_release(&key);
3181 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3183 struct abs_timeout timo;
3185 uint32_t count, flags;
3189 flags = fuword32(&sem->_flags);
3190 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3194 if (timeout != NULL)
3195 abs_timeout_init2(&timo, timeout);
3197 umtxq_lock(&uq->uq_key);
3198 umtxq_busy(&uq->uq_key);
3200 umtxq_unlock(&uq->uq_key);
3201 rv = fueword32(&sem->_count, &count);
3203 umtxq_lock(&uq->uq_key);
3204 umtxq_unbusy(&uq->uq_key);
3206 umtxq_unlock(&uq->uq_key);
3207 umtx_key_release(&uq->uq_key);
3211 if (USEM_COUNT(count) != 0) {
3212 umtxq_lock(&uq->uq_key);
3213 umtxq_unbusy(&uq->uq_key);
3215 umtxq_unlock(&uq->uq_key);
3216 umtx_key_release(&uq->uq_key);
3219 if (count == USEM_HAS_WAITERS)
3221 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3223 umtxq_lock(&uq->uq_key);
3224 umtxq_unbusy(&uq->uq_key);
3226 umtxq_unlock(&uq->uq_key);
3227 umtx_key_release(&uq->uq_key);
3233 umtxq_lock(&uq->uq_key);
3234 umtxq_unbusy(&uq->uq_key);
3236 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3238 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3242 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3243 /* A relative timeout cannot be restarted. */
3244 if (error == ERESTART)
3246 if (error == EINTR) {
3247 abs_timeout_update(&timo);
3248 timespecsub(&timo.end, &timo.cur,
3249 &timeout->_timeout);
3253 umtxq_unlock(&uq->uq_key);
3254 umtx_key_release(&uq->uq_key);
3259 * Signal a userland semaphore.
3262 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3264 struct umtx_key key;
3266 uint32_t count, flags;
3268 rv = fueword32(&sem->_flags, &flags);
3271 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3275 cnt = umtxq_count(&key);
3278 * If this was the last sleeping thread, clear the waiters
3283 rv = fueword32(&sem->_count, &count);
3284 while (rv != -1 && count & USEM_HAS_WAITERS)
3285 rv = casueword32(&sem->_count, count, &count,
3286 count & ~USEM_HAS_WAITERS);
3292 umtxq_signal(&key, 1);
3296 umtx_key_release(&key);
3301 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
3305 error = copyin(addr, tsp, sizeof(struct timespec));
3307 if (tsp->tv_sec < 0 ||
3308 tsp->tv_nsec >= 1000000000 ||
3316 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
3320 if (size <= sizeof(struct timespec)) {
3321 tp->_clockid = CLOCK_REALTIME;
3323 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
3325 error = copyin(addr, tp, sizeof(struct _umtx_time));
3328 if (tp->_timeout.tv_sec < 0 ||
3329 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3335 __umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap)
3338 return (EOPNOTSUPP);
3342 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
3344 struct _umtx_time timeout, *tm_p;
3347 if (uap->uaddr2 == NULL)
3350 error = umtx_copyin_umtx_time(
3351 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3356 return (do_wait(td, uap->obj, uap->val, tm_p, 0, 0));
3360 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
3362 struct _umtx_time timeout, *tm_p;
3365 if (uap->uaddr2 == NULL)
3368 error = umtx_copyin_umtx_time(
3369 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3374 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3378 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3380 struct _umtx_time *tm_p, timeout;
3383 if (uap->uaddr2 == NULL)
3386 error = umtx_copyin_umtx_time(
3387 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3392 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3396 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3399 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3402 #define BATCH_SIZE 128
3404 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3406 char *uaddrs[BATCH_SIZE], **upp;
3407 int count, error, i, pos, tocopy;
3409 upp = (char **)uap->obj;
3411 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3413 tocopy = MIN(count, BATCH_SIZE);
3414 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3417 for (i = 0; i < tocopy; ++i)
3418 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3425 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3428 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3432 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3434 struct _umtx_time *tm_p, timeout;
3437 /* Allow a null timespec (wait forever). */
3438 if (uap->uaddr2 == NULL)
3441 error = umtx_copyin_umtx_time(
3442 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3447 return (do_lock_umutex(td, uap->obj, tm_p, 0));
3451 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3454 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
3458 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3460 struct _umtx_time *tm_p, timeout;
3463 /* Allow a null timespec (wait forever). */
3464 if (uap->uaddr2 == NULL)
3467 error = umtx_copyin_umtx_time(
3468 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3473 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
3477 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3480 return (do_wake_umutex(td, uap->obj));
3484 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3487 return (do_unlock_umutex(td, uap->obj, false));
3491 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3494 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
3498 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3500 struct timespec *ts, timeout;
3503 /* Allow a null timespec (wait forever). */
3504 if (uap->uaddr2 == NULL)
3507 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3512 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3516 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3519 return (do_cv_signal(td, uap->obj));
3523 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3526 return (do_cv_broadcast(td, uap->obj));
3530 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3532 struct _umtx_time timeout;
3535 /* Allow a null timespec (wait forever). */
3536 if (uap->uaddr2 == NULL) {
3537 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3539 error = umtx_copyin_umtx_time(uap->uaddr2,
3540 (size_t)uap->uaddr1, &timeout);
3543 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3549 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3551 struct _umtx_time timeout;
3554 /* Allow a null timespec (wait forever). */
3555 if (uap->uaddr2 == NULL) {
3556 error = do_rw_wrlock(td, uap->obj, 0);
3558 error = umtx_copyin_umtx_time(uap->uaddr2,
3559 (size_t)uap->uaddr1, &timeout);
3563 error = do_rw_wrlock(td, uap->obj, &timeout);
3569 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3572 return (do_rw_unlock(td, uap->obj));
3575 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3577 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3579 struct _umtx_time *tm_p, timeout;
3582 /* Allow a null timespec (wait forever). */
3583 if (uap->uaddr2 == NULL)
3586 error = umtx_copyin_umtx_time(
3587 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3592 return (do_sem_wait(td, uap->obj, tm_p));
3596 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3599 return (do_sem_wake(td, uap->obj));
3604 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3607 return (do_wake2_umutex(td, uap->obj, uap->val));
3611 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
3613 struct _umtx_time *tm_p, timeout;
3617 /* Allow a null timespec (wait forever). */
3618 if (uap->uaddr2 == NULL) {
3622 uasize = (size_t)uap->uaddr1;
3623 error = umtx_copyin_umtx_time(uap->uaddr2, uasize, &timeout);
3628 error = do_sem2_wait(td, uap->obj, tm_p);
3629 if (error == EINTR && uap->uaddr2 != NULL &&
3630 (timeout._flags & UMTX_ABSTIME) == 0 &&
3631 uasize >= sizeof(struct _umtx_time) + sizeof(struct timespec)) {
3632 error = copyout(&timeout._timeout,
3633 (struct _umtx_time *)uap->uaddr2 + 1,
3634 sizeof(struct timespec));
3644 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap)
3647 return (do_sem2_wake(td, uap->obj));
3650 #define USHM_OBJ_UMTX(o) \
3651 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
3653 #define USHMF_REG_LINKED 0x0001
3654 #define USHMF_OBJ_LINKED 0x0002
3655 struct umtx_shm_reg {
3656 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
3657 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
3658 struct umtx_key ushm_key;
3659 struct ucred *ushm_cred;
3660 struct shmfd *ushm_obj;
3665 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
3666 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
3668 static uma_zone_t umtx_shm_reg_zone;
3669 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
3670 static struct mtx umtx_shm_lock;
3671 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
3672 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
3674 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
3677 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
3679 struct umtx_shm_reg_head d;
3680 struct umtx_shm_reg *reg, *reg1;
3683 mtx_lock(&umtx_shm_lock);
3684 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
3685 mtx_unlock(&umtx_shm_lock);
3686 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
3687 TAILQ_REMOVE(&d, reg, ushm_reg_link);
3688 umtx_shm_free_reg(reg);
3692 static struct task umtx_shm_reg_delfree_task =
3693 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
3695 static struct umtx_shm_reg *
3696 umtx_shm_find_reg_locked(const struct umtx_key *key)
3698 struct umtx_shm_reg *reg;
3699 struct umtx_shm_reg_head *reg_head;
3701 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
3702 mtx_assert(&umtx_shm_lock, MA_OWNED);
3703 reg_head = &umtx_shm_registry[key->hash];
3704 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
3705 KASSERT(reg->ushm_key.shared,
3706 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
3707 if (reg->ushm_key.info.shared.object ==
3708 key->info.shared.object &&
3709 reg->ushm_key.info.shared.offset ==
3710 key->info.shared.offset) {
3711 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
3712 KASSERT(reg->ushm_refcnt > 0,
3713 ("reg %p refcnt 0 onlist", reg));
3714 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
3715 ("reg %p not linked", reg));
3723 static struct umtx_shm_reg *
3724 umtx_shm_find_reg(const struct umtx_key *key)
3726 struct umtx_shm_reg *reg;
3728 mtx_lock(&umtx_shm_lock);
3729 reg = umtx_shm_find_reg_locked(key);
3730 mtx_unlock(&umtx_shm_lock);
3735 umtx_shm_free_reg(struct umtx_shm_reg *reg)
3738 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
3739 crfree(reg->ushm_cred);
3740 shm_drop(reg->ushm_obj);
3741 uma_zfree(umtx_shm_reg_zone, reg);
3745 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
3749 mtx_assert(&umtx_shm_lock, MA_OWNED);
3750 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
3752 res = reg->ushm_refcnt == 0;
3754 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
3755 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
3756 reg, ushm_reg_link);
3757 reg->ushm_flags &= ~USHMF_REG_LINKED;
3759 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
3760 LIST_REMOVE(reg, ushm_obj_link);
3761 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
3768 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
3774 object = reg->ushm_obj->shm_object;
3775 VM_OBJECT_WLOCK(object);
3776 object->flags |= OBJ_UMTXDEAD;
3777 VM_OBJECT_WUNLOCK(object);
3779 mtx_lock(&umtx_shm_lock);
3780 dofree = umtx_shm_unref_reg_locked(reg, force);
3781 mtx_unlock(&umtx_shm_lock);
3783 umtx_shm_free_reg(reg);
3787 umtx_shm_object_init(vm_object_t object)
3790 LIST_INIT(USHM_OBJ_UMTX(object));
3794 umtx_shm_object_terminated(vm_object_t object)
3796 struct umtx_shm_reg *reg, *reg1;
3799 if (LIST_EMPTY(USHM_OBJ_UMTX(object)))
3803 mtx_lock(&umtx_shm_lock);
3804 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
3805 if (umtx_shm_unref_reg_locked(reg, true)) {
3806 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
3811 mtx_unlock(&umtx_shm_lock);
3813 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
3817 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
3818 struct umtx_shm_reg **res)
3820 struct umtx_shm_reg *reg, *reg1;
3824 reg = umtx_shm_find_reg(key);
3829 cred = td->td_ucred;
3830 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
3832 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
3833 reg->ushm_refcnt = 1;
3834 bcopy(key, ®->ushm_key, sizeof(*key));
3835 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR);
3836 reg->ushm_cred = crhold(cred);
3837 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
3839 umtx_shm_free_reg(reg);
3842 mtx_lock(&umtx_shm_lock);
3843 reg1 = umtx_shm_find_reg_locked(key);
3845 mtx_unlock(&umtx_shm_lock);
3846 umtx_shm_free_reg(reg);
3851 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
3852 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
3854 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
3855 mtx_unlock(&umtx_shm_lock);
3861 umtx_shm_alive(struct thread *td, void *addr)
3864 vm_map_entry_t entry;
3871 map = &td->td_proc->p_vmspace->vm_map;
3872 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
3873 &object, &pindex, &prot, &wired);
3874 if (res != KERN_SUCCESS)
3879 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
3880 vm_map_lookup_done(map, entry);
3889 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
3890 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3891 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
3892 for (i = 0; i < nitems(umtx_shm_registry); i++)
3893 TAILQ_INIT(&umtx_shm_registry[i]);
3897 umtx_shm(struct thread *td, void *addr, u_int flags)
3899 struct umtx_key key;
3900 struct umtx_shm_reg *reg;
3904 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
3905 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
3907 if ((flags & UMTX_SHM_ALIVE) != 0)
3908 return (umtx_shm_alive(td, addr));
3909 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
3912 KASSERT(key.shared == 1, ("non-shared key"));
3913 if ((flags & UMTX_SHM_CREAT) != 0) {
3914 error = umtx_shm_create_reg(td, &key, ®);
3916 reg = umtx_shm_find_reg(&key);
3920 umtx_key_release(&key);
3923 KASSERT(reg != NULL, ("no reg"));
3924 if ((flags & UMTX_SHM_DESTROY) != 0) {
3925 umtx_shm_unref_reg(reg, true);
3929 error = mac_posixshm_check_open(td->td_ucred,
3930 reg->ushm_obj, FFLAGS(O_RDWR));
3933 error = shm_access(reg->ushm_obj, td->td_ucred,
3937 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
3939 shm_hold(reg->ushm_obj);
3940 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
3942 td->td_retval[0] = fd;
3946 umtx_shm_unref_reg(reg, false);
3951 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap)
3954 return (umtx_shm(td, uap->uaddr1, uap->val));
3958 umtx_robust_lists(struct thread *td, struct umtx_robust_lists_params *rbp)
3961 td->td_rb_list = rbp->robust_list_offset;
3962 td->td_rbp_list = rbp->robust_priv_list_offset;
3963 td->td_rb_inact = rbp->robust_inact_offset;
3968 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap)
3970 struct umtx_robust_lists_params rb;
3973 if (uap->val > sizeof(rb))
3975 bzero(&rb, sizeof(rb));
3976 error = copyin(uap->uaddr1, &rb, uap->val);
3979 return (umtx_robust_lists(td, &rb));
3982 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3984 static const _umtx_op_func op_table[] = {
3985 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
3986 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
3987 [UMTX_OP_WAIT] = __umtx_op_wait,
3988 [UMTX_OP_WAKE] = __umtx_op_wake,
3989 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
3990 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
3991 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
3992 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
3993 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
3994 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
3995 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
3996 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
3997 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
3998 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
3999 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4000 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4001 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4002 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4003 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4004 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4005 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4006 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4008 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4009 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4011 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4012 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4013 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4014 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4015 [UMTX_OP_SHM] = __umtx_op_shm,
4016 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4020 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4023 if ((unsigned)uap->op < nitems(op_table))
4024 return (*op_table[uap->op])(td, uap);
4028 #ifdef COMPAT_FREEBSD32
4035 struct umtx_time32 {
4036 struct timespec32 timeout;
4042 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
4044 struct timespec32 ts32;
4047 error = copyin(addr, &ts32, sizeof(struct timespec32));
4049 if (ts32.tv_sec < 0 ||
4050 ts32.tv_nsec >= 1000000000 ||
4054 tsp->tv_sec = ts32.tv_sec;
4055 tsp->tv_nsec = ts32.tv_nsec;
4062 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
4064 struct umtx_time32 t32;
4067 t32.clockid = CLOCK_REALTIME;
4069 if (size <= sizeof(struct timespec32))
4070 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
4072 error = copyin(addr, &t32, sizeof(struct umtx_time32));
4075 if (t32.timeout.tv_sec < 0 ||
4076 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
4078 tp->_timeout.tv_sec = t32.timeout.tv_sec;
4079 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
4080 tp->_flags = t32.flags;
4081 tp->_clockid = t32.clockid;
4086 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4088 struct _umtx_time *tm_p, timeout;
4091 if (uap->uaddr2 == NULL)
4094 error = umtx_copyin_umtx_time32(uap->uaddr2,
4095 (size_t)uap->uaddr1, &timeout);
4100 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
4104 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4106 struct _umtx_time *tm_p, timeout;
4109 /* Allow a null timespec (wait forever). */
4110 if (uap->uaddr2 == NULL)
4113 error = umtx_copyin_umtx_time32(uap->uaddr2,
4114 (size_t)uap->uaddr1, &timeout);
4119 return (do_lock_umutex(td, uap->obj, tm_p, 0));
4123 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4125 struct _umtx_time *tm_p, timeout;
4128 /* Allow a null timespec (wait forever). */
4129 if (uap->uaddr2 == NULL)
4132 error = umtx_copyin_umtx_time32(uap->uaddr2,
4133 (size_t)uap->uaddr1, &timeout);
4138 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
4142 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4144 struct timespec *ts, timeout;
4147 /* Allow a null timespec (wait forever). */
4148 if (uap->uaddr2 == NULL)
4151 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
4156 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
4160 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4162 struct _umtx_time timeout;
4165 /* Allow a null timespec (wait forever). */
4166 if (uap->uaddr2 == NULL) {
4167 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
4169 error = umtx_copyin_umtx_time32(uap->uaddr2,
4170 (size_t)uap->uaddr1, &timeout);
4173 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4179 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4181 struct _umtx_time timeout;
4184 /* Allow a null timespec (wait forever). */
4185 if (uap->uaddr2 == NULL) {
4186 error = do_rw_wrlock(td, uap->obj, 0);
4188 error = umtx_copyin_umtx_time32(uap->uaddr2,
4189 (size_t)uap->uaddr1, &timeout);
4192 error = do_rw_wrlock(td, uap->obj, &timeout);
4198 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
4200 struct _umtx_time *tm_p, timeout;
4203 if (uap->uaddr2 == NULL)
4206 error = umtx_copyin_umtx_time32(
4207 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
4212 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
4215 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4217 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4219 struct _umtx_time *tm_p, timeout;
4222 /* Allow a null timespec (wait forever). */
4223 if (uap->uaddr2 == NULL)
4226 error = umtx_copyin_umtx_time32(uap->uaddr2,
4227 (size_t)uap->uaddr1, &timeout);
4232 return (do_sem_wait(td, uap->obj, tm_p));
4237 __umtx_op_sem2_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4239 struct _umtx_time *tm_p, timeout;
4243 /* Allow a null timespec (wait forever). */
4244 if (uap->uaddr2 == NULL) {
4248 uasize = (size_t)uap->uaddr1;
4249 error = umtx_copyin_umtx_time32(uap->uaddr2, uasize, &timeout);
4254 error = do_sem2_wait(td, uap->obj, tm_p);
4255 if (error == EINTR && uap->uaddr2 != NULL &&
4256 (timeout._flags & UMTX_ABSTIME) == 0 &&
4257 uasize >= sizeof(struct umtx_time32) + sizeof(struct timespec32)) {
4258 struct timespec32 remain32 = {
4259 .tv_sec = timeout._timeout.tv_sec,
4260 .tv_nsec = timeout._timeout.tv_nsec
4262 error = copyout(&remain32,
4263 (struct umtx_time32 *)uap->uaddr2 + 1,
4264 sizeof(struct timespec32));
4274 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
4276 uint32_t uaddrs[BATCH_SIZE], **upp;
4277 int count, error, i, pos, tocopy;
4279 upp = (uint32_t **)uap->obj;
4281 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
4283 tocopy = MIN(count, BATCH_SIZE);
4284 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
4287 for (i = 0; i < tocopy; ++i)
4288 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
4295 struct umtx_robust_lists_params_compat32 {
4296 uint32_t robust_list_offset;
4297 uint32_t robust_priv_list_offset;
4298 uint32_t robust_inact_offset;
4302 __umtx_op_robust_lists_compat32(struct thread *td, struct _umtx_op_args *uap)
4304 struct umtx_robust_lists_params rb;
4305 struct umtx_robust_lists_params_compat32 rb32;
4308 if (uap->val > sizeof(rb32))
4310 bzero(&rb, sizeof(rb));
4311 bzero(&rb32, sizeof(rb32));
4312 error = copyin(uap->uaddr1, &rb32, uap->val);
4315 rb.robust_list_offset = rb32.robust_list_offset;
4316 rb.robust_priv_list_offset = rb32.robust_priv_list_offset;
4317 rb.robust_inact_offset = rb32.robust_inact_offset;
4318 return (umtx_robust_lists(td, &rb));
4321 static const _umtx_op_func op_table_compat32[] = {
4322 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4323 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4324 [UMTX_OP_WAIT] = __umtx_op_wait_compat32,
4325 [UMTX_OP_WAKE] = __umtx_op_wake,
4326 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4327 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex_compat32,
4328 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4329 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4330 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait_compat32,
4331 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4332 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4333 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_compat32,
4334 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock_compat32,
4335 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock_compat32,
4336 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4337 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private_compat32,
4338 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4339 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex_compat32,
4340 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4341 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4342 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait_compat32,
4343 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4345 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4346 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4348 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private32,
4349 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4350 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait_compat32,
4351 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4352 [UMTX_OP_SHM] = __umtx_op_shm,
4353 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists_compat32,
4357 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap)
4360 if ((unsigned)uap->op < nitems(op_table_compat32)) {
4361 return (*op_table_compat32[uap->op])(td,
4362 (struct _umtx_op_args *)uap);
4369 umtx_thread_init(struct thread *td)
4372 td->td_umtxq = umtxq_alloc();
4373 td->td_umtxq->uq_thread = td;
4377 umtx_thread_fini(struct thread *td)
4380 umtxq_free(td->td_umtxq);
4384 * It will be called when new thread is created, e.g fork().
4387 umtx_thread_alloc(struct thread *td)
4392 uq->uq_inherited_pri = PRI_MAX;
4394 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4395 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4396 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4397 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4403 * Clear robust lists for all process' threads, not delaying the
4404 * cleanup to thread_exit hook, since the relevant address space is
4405 * destroyed right now.
4408 umtx_exec_hook(void *arg __unused, struct proc *p,
4409 struct image_params *imgp __unused)
4413 KASSERT(p == curproc, ("need curproc"));
4414 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4415 (p->p_flag & P_STOPPED_SINGLE) != 0,
4416 ("curproc must be single-threaded"));
4418 * There is no need to lock the list as only this thread can be
4421 FOREACH_THREAD_IN_PROC(p, td) {
4422 KASSERT(td == curthread ||
4423 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4424 ("running thread %p %p", p, td));
4425 umtx_thread_cleanup(td);
4426 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4431 * thread_exit() hook.
4434 umtx_thread_exit(struct thread *td)
4437 umtx_thread_cleanup(td);
4441 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res)
4444 #ifdef COMPAT_FREEBSD32
4449 #ifdef COMPAT_FREEBSD32
4450 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4451 error = fueword32((void *)ptr, &res32);
4457 error = fueword((void *)ptr, &res1);
4467 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list)
4469 #ifdef COMPAT_FREEBSD32
4470 struct umutex32 m32;
4472 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4473 memcpy(&m32, m, sizeof(m32));
4474 *rb_list = m32.m_rb_lnk;
4477 *rb_list = m->m_rb_lnk;
4481 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact)
4486 KASSERT(td->td_proc == curproc, ("need current vmspace"));
4487 error = copyin((void *)rbp, &m, sizeof(m));
4490 if (rb_list != NULL)
4491 umtx_read_rb_list(td, &m, rb_list);
4492 if ((m.m_flags & UMUTEX_ROBUST) == 0)
4494 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
4495 /* inact is cleared after unlock, allow the inconsistency */
4496 return (inact ? 0 : EINVAL);
4497 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
4501 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
4510 error = umtx_read_uptr(td, rb_list, &rbp);
4511 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
4512 if (rbp == *rb_inact) {
4517 error = umtx_handle_rb(td, rbp, &rbp, inact);
4519 if (i == umtx_max_rb && umtx_verbose_rb) {
4520 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
4521 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
4523 if (error != 0 && umtx_verbose_rb) {
4524 uprintf("comm %s pid %d: handling %srb error %d\n",
4525 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
4530 * Clean up umtx data.
4533 umtx_thread_cleanup(struct thread *td)
4540 * Disown pi mutexes.
4544 if (uq->uq_inherited_pri != PRI_MAX ||
4545 !TAILQ_EMPTY(&uq->uq_pi_contested)) {
4546 mtx_lock(&umtx_lock);
4547 uq->uq_inherited_pri = PRI_MAX;
4548 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4549 pi->pi_owner = NULL;
4550 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4552 mtx_unlock(&umtx_lock);
4554 sched_lend_user_prio_cond(td, PRI_MAX);
4557 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
4561 * Handle terminated robust mutexes. Must be done after
4562 * robust pi disown, otherwise unlock could see unowned
4565 rb_inact = td->td_rb_inact;
4567 (void)umtx_read_uptr(td, rb_inact, &rb_inact);
4568 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "");
4569 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ");
4571 (void)umtx_handle_rb(td, rb_inact, NULL, true);