2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_compat.h"
38 #include "opt_umtx_profiling.h"
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/fcntl.h>
44 #include <sys/filedesc.h>
45 #include <sys/limits.h>
47 #include <sys/malloc.h>
49 #include <sys/mutex.h>
52 #include <sys/resource.h>
53 #include <sys/resourcevar.h>
54 #include <sys/rwlock.h>
56 #include <sys/sched.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/systm.h>
61 #include <sys/sysproto.h>
62 #include <sys/syscallsubr.h>
63 #include <sys/taskqueue.h>
65 #include <sys/eventhandler.h>
68 #include <security/mac/mac_framework.h>
71 #include <vm/vm_param.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_object.h>
76 #include <machine/atomic.h>
77 #include <machine/cpu.h>
79 #ifdef COMPAT_FREEBSD32
80 #include <compat/freebsd32/freebsd32_proto.h>
84 #define _UMUTEX_WAIT 2
87 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
88 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
91 /* Priority inheritance mutex info. */
94 struct thread *pi_owner;
99 /* List entry to link umtx holding by thread */
100 TAILQ_ENTRY(umtx_pi) pi_link;
102 /* List entry in hash */
103 TAILQ_ENTRY(umtx_pi) pi_hashlink;
105 /* List for waiters */
106 TAILQ_HEAD(,umtx_q) pi_blocked;
108 /* Identify a userland lock object */
109 struct umtx_key pi_key;
112 /* A userland synchronous object user. */
114 /* Linked list for the hash. */
115 TAILQ_ENTRY(umtx_q) uq_link;
118 struct umtx_key uq_key;
122 #define UQF_UMTXQ 0x0001
124 /* The thread waits on. */
125 struct thread *uq_thread;
128 * Blocked on PI mutex. read can use chain lock
129 * or umtx_lock, write must have both chain lock and
130 * umtx_lock being hold.
132 struct umtx_pi *uq_pi_blocked;
134 /* On blocked list */
135 TAILQ_ENTRY(umtx_q) uq_lockq;
137 /* Thread contending with us */
138 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
140 /* Inherited priority from PP mutex */
141 u_char uq_inherited_pri;
143 /* Spare queue ready to be reused */
144 struct umtxq_queue *uq_spare_queue;
146 /* The queue we on */
147 struct umtxq_queue *uq_cur_queue;
150 TAILQ_HEAD(umtxq_head, umtx_q);
152 /* Per-key wait-queue */
154 struct umtxq_head head;
156 LIST_ENTRY(umtxq_queue) link;
160 LIST_HEAD(umtxq_list, umtxq_queue);
162 /* Userland lock object's wait-queue chain */
164 /* Lock for this chain. */
167 /* List of sleep queues. */
168 struct umtxq_list uc_queue[2];
169 #define UMTX_SHARED_QUEUE 0
170 #define UMTX_EXCLUSIVE_QUEUE 1
172 LIST_HEAD(, umtxq_queue) uc_spare_queue;
177 /* Chain lock waiters */
180 /* All PI in the list */
181 TAILQ_HEAD(,umtx_pi) uc_pi_list;
183 #ifdef UMTX_PROFILING
189 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
192 * Don't propagate time-sharing priority, there is a security reason,
193 * a user can simply introduce PI-mutex, let thread A lock the mutex,
194 * and let another thread B block on the mutex, because B is
195 * sleeping, its priority will be boosted, this causes A's priority to
196 * be boosted via priority propagating too and will never be lowered even
197 * if it is using 100%CPU, this is unfair to other processes.
200 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
201 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
202 PRI_MAX_TIMESHARE : (td)->td_user_pri)
204 #define GOLDEN_RATIO_PRIME 2654404609U
206 #define UMTX_CHAINS 512
208 #define UMTX_SHIFTS (__WORD_BIT - 9)
210 #define GET_SHARE(flags) \
211 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
213 #define BUSY_SPINS 200
217 bool is_abs_real; /* TIMER_ABSTIME && CLOCK_REALTIME* */
222 #ifdef COMPAT_FREEBSD32
224 volatile __lwpid_t m_owner; /* Owner of the mutex */
225 __uint32_t m_flags; /* Flags of the mutex */
226 __uint32_t m_ceilings[2]; /* Priority protect ceiling */
227 __uint32_t m_rb_lnk; /* Robust linkage */
229 __uint32_t m_spare[2];
232 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
233 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
234 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
237 int umtx_shm_vnobj_persistent = 0;
238 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
239 &umtx_shm_vnobj_persistent, 0,
240 "False forces destruction of umtx attached to file, on last close");
241 static int umtx_max_rb = 1000;
242 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
246 static uma_zone_t umtx_pi_zone;
247 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
248 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
249 static int umtx_pi_allocated;
251 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
252 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
253 &umtx_pi_allocated, 0, "Allocated umtx_pi");
254 static int umtx_verbose_rb = 1;
255 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
259 #ifdef UMTX_PROFILING
260 static long max_length;
261 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
262 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
265 static void abs_timeout_update(struct abs_timeout *timo);
267 static void umtx_shm_init(void);
268 static void umtxq_sysinit(void *);
269 static void umtxq_hash(struct umtx_key *key);
270 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
271 static void umtxq_lock(struct umtx_key *key);
272 static void umtxq_unlock(struct umtx_key *key);
273 static void umtxq_busy(struct umtx_key *key);
274 static void umtxq_unbusy(struct umtx_key *key);
275 static void umtxq_insert_queue(struct umtx_q *uq, int q);
276 static void umtxq_remove_queue(struct umtx_q *uq, int q);
277 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
278 static int umtxq_count(struct umtx_key *key);
279 static struct umtx_pi *umtx_pi_alloc(int);
280 static void umtx_pi_free(struct umtx_pi *pi);
281 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
283 static void umtx_thread_cleanup(struct thread *td);
284 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
285 struct image_params *imgp __unused);
286 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
288 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
289 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
290 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
292 static struct mtx umtx_lock;
294 #ifdef UMTX_PROFILING
296 umtx_init_profiling(void)
298 struct sysctl_oid *chain_oid;
302 for (i = 0; i < UMTX_CHAINS; ++i) {
303 snprintf(chain_name, sizeof(chain_name), "%d", i);
304 chain_oid = SYSCTL_ADD_NODE(NULL,
305 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
306 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
307 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
308 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
309 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
310 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
315 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
319 struct umtxq_chain *uc;
320 u_int fract, i, j, tot, whole;
321 u_int sf0, sf1, sf2, sf3, sf4;
322 u_int si0, si1, si2, si3, si4;
323 u_int sw0, sw1, sw2, sw3, sw4;
325 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
326 for (i = 0; i < 2; i++) {
328 for (j = 0; j < UMTX_CHAINS; ++j) {
329 uc = &umtxq_chains[i][j];
330 mtx_lock(&uc->uc_lock);
331 tot += uc->max_length;
332 mtx_unlock(&uc->uc_lock);
335 sbuf_printf(&sb, "%u) Empty ", i);
337 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
338 si0 = si1 = si2 = si3 = si4 = 0;
339 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
340 for (j = 0; j < UMTX_CHAINS; j++) {
341 uc = &umtxq_chains[i][j];
342 mtx_lock(&uc->uc_lock);
343 whole = uc->max_length * 100;
344 mtx_unlock(&uc->uc_lock);
345 fract = (whole % tot) * 100;
346 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
350 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
355 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
360 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
365 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
372 sbuf_printf(&sb, "queue %u:\n", i);
373 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
375 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
377 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
379 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
381 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
387 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
393 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
395 struct umtxq_chain *uc;
400 error = sysctl_handle_int(oidp, &clear, 0, req);
401 if (error != 0 || req->newptr == NULL)
405 for (i = 0; i < 2; ++i) {
406 for (j = 0; j < UMTX_CHAINS; ++j) {
407 uc = &umtxq_chains[i][j];
408 mtx_lock(&uc->uc_lock);
411 mtx_unlock(&uc->uc_lock);
418 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
419 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
420 sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics");
421 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
422 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
423 sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length");
427 umtxq_sysinit(void *arg __unused)
431 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
432 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
433 for (i = 0; i < 2; ++i) {
434 for (j = 0; j < UMTX_CHAINS; ++j) {
435 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
436 MTX_DEF | MTX_DUPOK);
437 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
438 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
439 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
440 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
441 umtxq_chains[i][j].uc_busy = 0;
442 umtxq_chains[i][j].uc_waiters = 0;
443 #ifdef UMTX_PROFILING
444 umtxq_chains[i][j].length = 0;
445 umtxq_chains[i][j].max_length = 0;
449 #ifdef UMTX_PROFILING
450 umtx_init_profiling();
452 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
453 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
454 EVENTHANDLER_PRI_ANY);
463 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
464 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
466 TAILQ_INIT(&uq->uq_spare_queue->head);
467 TAILQ_INIT(&uq->uq_pi_contested);
468 uq->uq_inherited_pri = PRI_MAX;
473 umtxq_free(struct umtx_q *uq)
476 MPASS(uq->uq_spare_queue != NULL);
477 free(uq->uq_spare_queue, M_UMTX);
482 umtxq_hash(struct umtx_key *key)
486 n = (uintptr_t)key->info.both.a + key->info.both.b;
487 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
490 static inline struct umtxq_chain *
491 umtxq_getchain(struct umtx_key *key)
494 if (key->type <= TYPE_SEM)
495 return (&umtxq_chains[1][key->hash]);
496 return (&umtxq_chains[0][key->hash]);
503 umtxq_lock(struct umtx_key *key)
505 struct umtxq_chain *uc;
507 uc = umtxq_getchain(key);
508 mtx_lock(&uc->uc_lock);
515 umtxq_unlock(struct umtx_key *key)
517 struct umtxq_chain *uc;
519 uc = umtxq_getchain(key);
520 mtx_unlock(&uc->uc_lock);
524 * Set chain to busy state when following operation
525 * may be blocked (kernel mutex can not be used).
528 umtxq_busy(struct umtx_key *key)
530 struct umtxq_chain *uc;
532 uc = umtxq_getchain(key);
533 mtx_assert(&uc->uc_lock, MA_OWNED);
537 int count = BUSY_SPINS;
540 while (uc->uc_busy && --count > 0)
546 while (uc->uc_busy) {
548 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
559 umtxq_unbusy(struct umtx_key *key)
561 struct umtxq_chain *uc;
563 uc = umtxq_getchain(key);
564 mtx_assert(&uc->uc_lock, MA_OWNED);
565 KASSERT(uc->uc_busy != 0, ("not busy"));
572 umtxq_unbusy_unlocked(struct umtx_key *key)
580 static struct umtxq_queue *
581 umtxq_queue_lookup(struct umtx_key *key, int q)
583 struct umtxq_queue *uh;
584 struct umtxq_chain *uc;
586 uc = umtxq_getchain(key);
587 UMTXQ_LOCKED_ASSERT(uc);
588 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
589 if (umtx_key_match(&uh->key, key))
597 umtxq_insert_queue(struct umtx_q *uq, int q)
599 struct umtxq_queue *uh;
600 struct umtxq_chain *uc;
602 uc = umtxq_getchain(&uq->uq_key);
603 UMTXQ_LOCKED_ASSERT(uc);
604 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
605 uh = umtxq_queue_lookup(&uq->uq_key, q);
607 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
609 uh = uq->uq_spare_queue;
610 uh->key = uq->uq_key;
611 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
612 #ifdef UMTX_PROFILING
614 if (uc->length > uc->max_length) {
615 uc->max_length = uc->length;
616 if (uc->max_length > max_length)
617 max_length = uc->max_length;
621 uq->uq_spare_queue = NULL;
623 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
625 uq->uq_flags |= UQF_UMTXQ;
626 uq->uq_cur_queue = uh;
631 umtxq_remove_queue(struct umtx_q *uq, int q)
633 struct umtxq_chain *uc;
634 struct umtxq_queue *uh;
636 uc = umtxq_getchain(&uq->uq_key);
637 UMTXQ_LOCKED_ASSERT(uc);
638 if (uq->uq_flags & UQF_UMTXQ) {
639 uh = uq->uq_cur_queue;
640 TAILQ_REMOVE(&uh->head, uq, uq_link);
642 uq->uq_flags &= ~UQF_UMTXQ;
643 if (TAILQ_EMPTY(&uh->head)) {
644 KASSERT(uh->length == 0,
645 ("inconsistent umtxq_queue length"));
646 #ifdef UMTX_PROFILING
649 LIST_REMOVE(uh, link);
651 uh = LIST_FIRST(&uc->uc_spare_queue);
652 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
653 LIST_REMOVE(uh, link);
655 uq->uq_spare_queue = uh;
656 uq->uq_cur_queue = NULL;
661 * Check if there are multiple waiters
664 umtxq_count(struct umtx_key *key)
666 struct umtxq_chain *uc;
667 struct umtxq_queue *uh;
669 uc = umtxq_getchain(key);
670 UMTXQ_LOCKED_ASSERT(uc);
671 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
678 * Check if there are multiple PI waiters and returns first
682 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
684 struct umtxq_chain *uc;
685 struct umtxq_queue *uh;
688 uc = umtxq_getchain(key);
689 UMTXQ_LOCKED_ASSERT(uc);
690 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
692 *first = TAILQ_FIRST(&uh->head);
699 umtxq_check_susp(struct thread *td)
705 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
706 * eventually break the lockstep loop.
708 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
713 if (P_SHOULDSTOP(p) ||
714 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
715 if (p->p_flag & P_SINGLE_EXIT)
725 * Wake up threads waiting on an userland object.
729 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
731 struct umtxq_chain *uc;
732 struct umtxq_queue *uh;
737 uc = umtxq_getchain(key);
738 UMTXQ_LOCKED_ASSERT(uc);
739 uh = umtxq_queue_lookup(key, q);
741 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
742 umtxq_remove_queue(uq, q);
753 * Wake up specified thread.
756 umtxq_signal_thread(struct umtx_q *uq)
758 struct umtxq_chain *uc;
760 uc = umtxq_getchain(&uq->uq_key);
761 UMTXQ_LOCKED_ASSERT(uc);
767 tstohz(const struct timespec *tsp)
771 TIMESPEC_TO_TIMEVAL(&tv, tsp);
776 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
777 const struct timespec *timeout)
780 timo->clockid = clockid;
782 timo->is_abs_real = false;
783 abs_timeout_update(timo);
784 timo->end = timo->cur;
785 timespecadd(&timo->end, timeout);
787 timo->end = *timeout;
788 timo->is_abs_real = clockid == CLOCK_REALTIME ||
789 clockid == CLOCK_REALTIME_FAST ||
790 clockid == CLOCK_REALTIME_PRECISE;
792 * If is_abs_real, umtxq_sleep will read the clock
793 * after setting td_rtcgen; otherwise, read it here.
795 if (!timo->is_abs_real) {
796 abs_timeout_update(timo);
802 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
805 abs_timeout_init(timo, umtxtime->_clockid,
806 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
810 abs_timeout_update(struct abs_timeout *timo)
813 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
817 abs_timeout_gethz(struct abs_timeout *timo)
821 if (timespeccmp(&timo->end, &timo->cur, <=))
824 timespecsub(&tts, &timo->cur);
825 return (tstohz(&tts));
829 umtx_unlock_val(uint32_t flags, bool rb)
833 return (UMUTEX_RB_OWNERDEAD);
834 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
835 return (UMUTEX_RB_NOTRECOV);
837 return (UMUTEX_UNOWNED);
842 * Put thread into sleep state, before sleeping, check if
843 * thread was removed from umtx queue.
846 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
848 struct umtxq_chain *uc;
851 if (abstime != NULL && abstime->is_abs_real) {
852 curthread->td_rtcgen = atomic_load_acq_int(&rtc_generation);
853 abs_timeout_update(abstime);
856 uc = umtxq_getchain(&uq->uq_key);
857 UMTXQ_LOCKED_ASSERT(uc);
859 if (!(uq->uq_flags & UQF_UMTXQ)) {
863 if (abstime != NULL) {
864 timo = abs_timeout_gethz(abstime);
871 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
872 if (error == EINTR || error == ERESTART) {
873 umtxq_lock(&uq->uq_key);
876 if (abstime != NULL) {
877 if (abstime->is_abs_real)
878 curthread->td_rtcgen =
879 atomic_load_acq_int(&rtc_generation);
880 abs_timeout_update(abstime);
882 umtxq_lock(&uq->uq_key);
885 curthread->td_rtcgen = 0;
890 * Convert userspace address into unique logical address.
893 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
895 struct thread *td = curthread;
897 vm_map_entry_t entry;
903 if (share == THREAD_SHARE) {
905 key->info.private.vs = td->td_proc->p_vmspace;
906 key->info.private.addr = (uintptr_t)addr;
908 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
909 map = &td->td_proc->p_vmspace->vm_map;
910 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
911 &entry, &key->info.shared.object, &pindex, &prot,
912 &wired) != KERN_SUCCESS) {
916 if ((share == PROCESS_SHARE) ||
917 (share == AUTO_SHARE &&
918 VM_INHERIT_SHARE == entry->inheritance)) {
920 key->info.shared.offset = (vm_offset_t)addr -
921 entry->start + entry->offset;
922 vm_object_reference(key->info.shared.object);
925 key->info.private.vs = td->td_proc->p_vmspace;
926 key->info.private.addr = (uintptr_t)addr;
928 vm_map_lookup_done(map, entry);
939 umtx_key_release(struct umtx_key *key)
942 vm_object_deallocate(key->info.shared.object);
946 * Fetch and compare value, sleep on the address if value is not changed.
949 do_wait(struct thread *td, void *addr, u_long id,
950 struct _umtx_time *timeout, int compat32, int is_private)
952 struct abs_timeout timo;
959 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
960 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
964 abs_timeout_init2(&timo, timeout);
966 umtxq_lock(&uq->uq_key);
968 umtxq_unlock(&uq->uq_key);
970 error = fueword(addr, &tmp);
974 error = fueword32(addr, &tmp32);
980 umtxq_lock(&uq->uq_key);
983 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
985 if ((uq->uq_flags & UQF_UMTXQ) == 0)
989 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
992 umtxq_unlock(&uq->uq_key);
993 umtx_key_release(&uq->uq_key);
994 if (error == ERESTART)
1000 * Wake up threads sleeping on the specified address.
1003 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1005 struct umtx_key key;
1008 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1009 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1012 umtxq_signal(&key, n_wake);
1014 umtx_key_release(&key);
1019 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1022 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1023 struct _umtx_time *timeout, int mode)
1025 struct abs_timeout timo;
1027 uint32_t owner, old, id;
1033 if (timeout != NULL)
1034 abs_timeout_init2(&timo, timeout);
1037 * Care must be exercised when dealing with umtx structure. It
1038 * can fault on any access.
1041 rv = fueword32(&m->m_owner, &owner);
1044 if (mode == _UMUTEX_WAIT) {
1045 if (owner == UMUTEX_UNOWNED ||
1046 owner == UMUTEX_CONTESTED ||
1047 owner == UMUTEX_RB_OWNERDEAD ||
1048 owner == UMUTEX_RB_NOTRECOV)
1052 * Robust mutex terminated. Kernel duty is to
1053 * return EOWNERDEAD to the userspace. The
1054 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1055 * by the common userspace code.
1057 if (owner == UMUTEX_RB_OWNERDEAD) {
1058 rv = casueword32(&m->m_owner,
1059 UMUTEX_RB_OWNERDEAD, &owner,
1060 id | UMUTEX_CONTESTED);
1063 if (owner == UMUTEX_RB_OWNERDEAD)
1064 return (EOWNERDEAD); /* success */
1065 rv = umtxq_check_susp(td);
1070 if (owner == UMUTEX_RB_NOTRECOV)
1071 return (ENOTRECOVERABLE);
1075 * Try the uncontested case. This should be
1078 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1080 /* The address was invalid. */
1084 /* The acquire succeeded. */
1085 if (owner == UMUTEX_UNOWNED)
1089 * If no one owns it but it is contested try
1092 if (owner == UMUTEX_CONTESTED) {
1093 rv = casueword32(&m->m_owner,
1094 UMUTEX_CONTESTED, &owner,
1095 id | UMUTEX_CONTESTED);
1096 /* The address was invalid. */
1100 if (owner == UMUTEX_CONTESTED)
1103 rv = umtxq_check_susp(td);
1108 * If this failed the lock has
1115 if (mode == _UMUTEX_TRY)
1119 * If we caught a signal, we have retried and now
1125 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1126 GET_SHARE(flags), &uq->uq_key)) != 0)
1129 umtxq_lock(&uq->uq_key);
1130 umtxq_busy(&uq->uq_key);
1132 umtxq_unlock(&uq->uq_key);
1135 * Set the contested bit so that a release in user space
1136 * knows to use the system call for unlock. If this fails
1137 * either some one else has acquired the lock or it has been
1140 rv = casueword32(&m->m_owner, owner, &old,
1141 owner | UMUTEX_CONTESTED);
1143 /* The address was invalid. */
1145 umtxq_lock(&uq->uq_key);
1147 umtxq_unbusy(&uq->uq_key);
1148 umtxq_unlock(&uq->uq_key);
1149 umtx_key_release(&uq->uq_key);
1154 * We set the contested bit, sleep. Otherwise the lock changed
1155 * and we need to retry or we lost a race to the thread
1156 * unlocking the umtx.
1158 umtxq_lock(&uq->uq_key);
1159 umtxq_unbusy(&uq->uq_key);
1161 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1164 umtxq_unlock(&uq->uq_key);
1165 umtx_key_release(&uq->uq_key);
1168 error = umtxq_check_susp(td);
1175 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1178 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1180 struct umtx_key key;
1181 uint32_t owner, old, id, newlock;
1186 * Make sure we own this mtx.
1188 error = fueword32(&m->m_owner, &owner);
1192 if ((owner & ~UMUTEX_CONTESTED) != id)
1195 newlock = umtx_unlock_val(flags, rb);
1196 if ((owner & UMUTEX_CONTESTED) == 0) {
1197 error = casueword32(&m->m_owner, owner, &old, newlock);
1205 /* We should only ever be in here for contested locks */
1206 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1212 count = umtxq_count(&key);
1216 * When unlocking the umtx, it must be marked as unowned if
1217 * there is zero or one thread only waiting for it.
1218 * Otherwise, it must be marked as contested.
1221 newlock |= UMUTEX_CONTESTED;
1222 error = casueword32(&m->m_owner, owner, &old, newlock);
1224 umtxq_signal(&key, 1);
1227 umtx_key_release(&key);
1236 * Check if the mutex is available and wake up a waiter,
1237 * only for simple mutex.
1240 do_wake_umutex(struct thread *td, struct umutex *m)
1242 struct umtx_key key;
1248 error = fueword32(&m->m_owner, &owner);
1252 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1253 owner != UMUTEX_RB_NOTRECOV)
1256 error = fueword32(&m->m_flags, &flags);
1260 /* We should only ever be in here for contested locks */
1261 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1267 count = umtxq_count(&key);
1270 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1271 owner != UMUTEX_RB_NOTRECOV) {
1272 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1279 if (error == 0 && count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1280 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1281 umtxq_signal(&key, 1);
1284 umtx_key_release(&key);
1289 * Check if the mutex has waiters and tries to fix contention bit.
1292 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1294 struct umtx_key key;
1295 uint32_t owner, old;
1300 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1304 type = TYPE_NORMAL_UMUTEX;
1306 case UMUTEX_PRIO_INHERIT:
1307 type = TYPE_PI_UMUTEX;
1309 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1310 type = TYPE_PI_ROBUST_UMUTEX;
1312 case UMUTEX_PRIO_PROTECT:
1313 type = TYPE_PP_UMUTEX;
1315 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1316 type = TYPE_PP_ROBUST_UMUTEX;
1321 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1327 count = umtxq_count(&key);
1330 * Only repair contention bit if there is a waiter, this means the mutex
1331 * is still being referenced by userland code, otherwise don't update
1335 error = fueword32(&m->m_owner, &owner);
1338 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0) {
1339 error = casueword32(&m->m_owner, owner, &old,
1340 owner | UMUTEX_CONTESTED);
1348 error = umtxq_check_susp(td);
1352 } else if (count == 1) {
1353 error = fueword32(&m->m_owner, &owner);
1356 while (error == 0 && (owner & ~UMUTEX_CONTESTED) != 0 &&
1357 (owner & UMUTEX_CONTESTED) == 0) {
1358 error = casueword32(&m->m_owner, owner, &old,
1359 owner | UMUTEX_CONTESTED);
1367 error = umtxq_check_susp(td);
1373 if (error == EFAULT) {
1374 umtxq_signal(&key, INT_MAX);
1375 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1376 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1377 umtxq_signal(&key, 1);
1380 umtx_key_release(&key);
1384 static inline struct umtx_pi *
1385 umtx_pi_alloc(int flags)
1389 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1390 TAILQ_INIT(&pi->pi_blocked);
1391 atomic_add_int(&umtx_pi_allocated, 1);
1396 umtx_pi_free(struct umtx_pi *pi)
1398 uma_zfree(umtx_pi_zone, pi);
1399 atomic_add_int(&umtx_pi_allocated, -1);
1403 * Adjust the thread's position on a pi_state after its priority has been
1407 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1409 struct umtx_q *uq, *uq1, *uq2;
1412 mtx_assert(&umtx_lock, MA_OWNED);
1419 * Check if the thread needs to be moved on the blocked chain.
1420 * It needs to be moved if either its priority is lower than
1421 * the previous thread or higher than the next thread.
1423 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1424 uq2 = TAILQ_NEXT(uq, uq_lockq);
1425 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1426 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1428 * Remove thread from blocked chain and determine where
1429 * it should be moved to.
1431 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1432 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1433 td1 = uq1->uq_thread;
1434 MPASS(td1->td_proc->p_magic == P_MAGIC);
1435 if (UPRI(td1) > UPRI(td))
1440 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1442 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1447 static struct umtx_pi *
1448 umtx_pi_next(struct umtx_pi *pi)
1450 struct umtx_q *uq_owner;
1452 if (pi->pi_owner == NULL)
1454 uq_owner = pi->pi_owner->td_umtxq;
1455 if (uq_owner == NULL)
1457 return (uq_owner->uq_pi_blocked);
1461 * Floyd's Cycle-Finding Algorithm.
1464 umtx_pi_check_loop(struct umtx_pi *pi)
1466 struct umtx_pi *pi1; /* fast iterator */
1468 mtx_assert(&umtx_lock, MA_OWNED);
1473 pi = umtx_pi_next(pi);
1476 pi1 = umtx_pi_next(pi1);
1479 pi1 = umtx_pi_next(pi1);
1489 * Propagate priority when a thread is blocked on POSIX
1493 umtx_propagate_priority(struct thread *td)
1499 mtx_assert(&umtx_lock, MA_OWNED);
1502 pi = uq->uq_pi_blocked;
1505 if (umtx_pi_check_loop(pi))
1510 if (td == NULL || td == curthread)
1513 MPASS(td->td_proc != NULL);
1514 MPASS(td->td_proc->p_magic == P_MAGIC);
1517 if (td->td_lend_user_pri > pri)
1518 sched_lend_user_prio(td, pri);
1526 * Pick up the lock that td is blocked on.
1529 pi = uq->uq_pi_blocked;
1532 /* Resort td on the list if needed. */
1533 umtx_pi_adjust_thread(pi, td);
1538 * Unpropagate priority for a PI mutex when a thread blocked on
1539 * it is interrupted by signal or resumed by others.
1542 umtx_repropagate_priority(struct umtx_pi *pi)
1544 struct umtx_q *uq, *uq_owner;
1545 struct umtx_pi *pi2;
1548 mtx_assert(&umtx_lock, MA_OWNED);
1550 if (umtx_pi_check_loop(pi))
1552 while (pi != NULL && pi->pi_owner != NULL) {
1554 uq_owner = pi->pi_owner->td_umtxq;
1556 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1557 uq = TAILQ_FIRST(&pi2->pi_blocked);
1559 if (pri > UPRI(uq->uq_thread))
1560 pri = UPRI(uq->uq_thread);
1564 if (pri > uq_owner->uq_inherited_pri)
1565 pri = uq_owner->uq_inherited_pri;
1566 thread_lock(pi->pi_owner);
1567 sched_lend_user_prio(pi->pi_owner, pri);
1568 thread_unlock(pi->pi_owner);
1569 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1570 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1575 * Insert a PI mutex into owned list.
1578 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1580 struct umtx_q *uq_owner;
1582 uq_owner = owner->td_umtxq;
1583 mtx_assert(&umtx_lock, MA_OWNED);
1584 MPASS(pi->pi_owner == NULL);
1585 pi->pi_owner = owner;
1586 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1591 * Disown a PI mutex, and remove it from the owned list.
1594 umtx_pi_disown(struct umtx_pi *pi)
1597 mtx_assert(&umtx_lock, MA_OWNED);
1598 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1599 pi->pi_owner = NULL;
1603 * Claim ownership of a PI mutex.
1606 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1611 mtx_lock(&umtx_lock);
1612 if (pi->pi_owner == owner) {
1613 mtx_unlock(&umtx_lock);
1617 if (pi->pi_owner != NULL) {
1619 * userland may have already messed the mutex, sigh.
1621 mtx_unlock(&umtx_lock);
1624 umtx_pi_setowner(pi, owner);
1625 uq = TAILQ_FIRST(&pi->pi_blocked);
1627 pri = UPRI(uq->uq_thread);
1629 if (pri < UPRI(owner))
1630 sched_lend_user_prio(owner, pri);
1631 thread_unlock(owner);
1633 mtx_unlock(&umtx_lock);
1638 * Adjust a thread's order position in its blocked PI mutex,
1639 * this may result new priority propagating process.
1642 umtx_pi_adjust(struct thread *td, u_char oldpri)
1648 mtx_lock(&umtx_lock);
1650 * Pick up the lock that td is blocked on.
1652 pi = uq->uq_pi_blocked;
1654 umtx_pi_adjust_thread(pi, td);
1655 umtx_repropagate_priority(pi);
1657 mtx_unlock(&umtx_lock);
1661 * Sleep on a PI mutex.
1664 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1665 const char *wmesg, struct abs_timeout *timo, bool shared)
1667 struct umtxq_chain *uc;
1668 struct thread *td, *td1;
1674 KASSERT(td == curthread, ("inconsistent uq_thread"));
1675 uc = umtxq_getchain(&uq->uq_key);
1676 UMTXQ_LOCKED_ASSERT(uc);
1677 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1679 mtx_lock(&umtx_lock);
1680 if (pi->pi_owner == NULL) {
1681 mtx_unlock(&umtx_lock);
1682 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
1683 mtx_lock(&umtx_lock);
1685 if (pi->pi_owner == NULL)
1686 umtx_pi_setowner(pi, td1);
1687 PROC_UNLOCK(td1->td_proc);
1691 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1692 pri = UPRI(uq1->uq_thread);
1698 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1700 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1702 uq->uq_pi_blocked = pi;
1704 td->td_flags |= TDF_UPIBLOCKED;
1706 umtx_propagate_priority(td);
1707 mtx_unlock(&umtx_lock);
1708 umtxq_unbusy(&uq->uq_key);
1710 error = umtxq_sleep(uq, wmesg, timo);
1713 mtx_lock(&umtx_lock);
1714 uq->uq_pi_blocked = NULL;
1716 td->td_flags &= ~TDF_UPIBLOCKED;
1718 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1719 umtx_repropagate_priority(pi);
1720 mtx_unlock(&umtx_lock);
1721 umtxq_unlock(&uq->uq_key);
1727 * Add reference count for a PI mutex.
1730 umtx_pi_ref(struct umtx_pi *pi)
1732 struct umtxq_chain *uc;
1734 uc = umtxq_getchain(&pi->pi_key);
1735 UMTXQ_LOCKED_ASSERT(uc);
1740 * Decrease reference count for a PI mutex, if the counter
1741 * is decreased to zero, its memory space is freed.
1744 umtx_pi_unref(struct umtx_pi *pi)
1746 struct umtxq_chain *uc;
1748 uc = umtxq_getchain(&pi->pi_key);
1749 UMTXQ_LOCKED_ASSERT(uc);
1750 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1751 if (--pi->pi_refcount == 0) {
1752 mtx_lock(&umtx_lock);
1753 if (pi->pi_owner != NULL)
1755 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1756 ("blocked queue not empty"));
1757 mtx_unlock(&umtx_lock);
1758 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1764 * Find a PI mutex in hash table.
1766 static struct umtx_pi *
1767 umtx_pi_lookup(struct umtx_key *key)
1769 struct umtxq_chain *uc;
1772 uc = umtxq_getchain(key);
1773 UMTXQ_LOCKED_ASSERT(uc);
1775 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1776 if (umtx_key_match(&pi->pi_key, key)) {
1784 * Insert a PI mutex into hash table.
1787 umtx_pi_insert(struct umtx_pi *pi)
1789 struct umtxq_chain *uc;
1791 uc = umtxq_getchain(&pi->pi_key);
1792 UMTXQ_LOCKED_ASSERT(uc);
1793 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1800 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1801 struct _umtx_time *timeout, int try)
1803 struct abs_timeout timo;
1805 struct umtx_pi *pi, *new_pi;
1806 uint32_t id, old_owner, owner, old;
1812 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
1813 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
1817 if (timeout != NULL)
1818 abs_timeout_init2(&timo, timeout);
1820 umtxq_lock(&uq->uq_key);
1821 pi = umtx_pi_lookup(&uq->uq_key);
1823 new_pi = umtx_pi_alloc(M_NOWAIT);
1824 if (new_pi == NULL) {
1825 umtxq_unlock(&uq->uq_key);
1826 new_pi = umtx_pi_alloc(M_WAITOK);
1827 umtxq_lock(&uq->uq_key);
1828 pi = umtx_pi_lookup(&uq->uq_key);
1830 umtx_pi_free(new_pi);
1834 if (new_pi != NULL) {
1835 new_pi->pi_key = uq->uq_key;
1836 umtx_pi_insert(new_pi);
1841 umtxq_unlock(&uq->uq_key);
1844 * Care must be exercised when dealing with umtx structure. It
1845 * can fault on any access.
1849 * Try the uncontested case. This should be done in userland.
1851 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
1852 /* The address was invalid. */
1858 /* The acquire succeeded. */
1859 if (owner == UMUTEX_UNOWNED) {
1864 if (owner == UMUTEX_RB_NOTRECOV) {
1865 error = ENOTRECOVERABLE;
1869 /* If no one owns it but it is contested try to acquire it. */
1870 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
1872 rv = casueword32(&m->m_owner, owner, &owner,
1873 id | UMUTEX_CONTESTED);
1874 /* The address was invalid. */
1880 if (owner == old_owner) {
1881 umtxq_lock(&uq->uq_key);
1882 umtxq_busy(&uq->uq_key);
1883 error = umtx_pi_claim(pi, td);
1884 umtxq_unbusy(&uq->uq_key);
1885 umtxq_unlock(&uq->uq_key);
1888 * Since we're going to return an
1889 * error, restore the m_owner to its
1890 * previous, unowned state to avoid
1891 * compounding the problem.
1893 (void)casuword32(&m->m_owner,
1894 id | UMUTEX_CONTESTED,
1898 old_owner == UMUTEX_RB_OWNERDEAD)
1903 error = umtxq_check_susp(td);
1907 /* If this failed the lock has changed, restart. */
1911 if ((owner & ~UMUTEX_CONTESTED) == id) {
1922 * If we caught a signal, we have retried and now
1928 umtxq_lock(&uq->uq_key);
1929 umtxq_busy(&uq->uq_key);
1930 umtxq_unlock(&uq->uq_key);
1933 * Set the contested bit so that a release in user space
1934 * knows to use the system call for unlock. If this fails
1935 * either some one else has acquired the lock or it has been
1938 rv = casueword32(&m->m_owner, owner, &old, owner |
1941 /* The address was invalid. */
1943 umtxq_unbusy_unlocked(&uq->uq_key);
1948 umtxq_lock(&uq->uq_key);
1950 * We set the contested bit, sleep. Otherwise the lock changed
1951 * and we need to retry or we lost a race to the thread
1952 * unlocking the umtx. Note that the UMUTEX_RB_OWNERDEAD
1953 * value for owner is impossible there.
1956 error = umtxq_sleep_pi(uq, pi,
1957 owner & ~UMUTEX_CONTESTED,
1958 "umtxpi", timeout == NULL ? NULL : &timo,
1959 (flags & USYNC_PROCESS_SHARED) != 0);
1963 umtxq_unbusy(&uq->uq_key);
1964 umtxq_unlock(&uq->uq_key);
1967 error = umtxq_check_susp(td);
1972 umtxq_lock(&uq->uq_key);
1974 umtxq_unlock(&uq->uq_key);
1976 umtx_key_release(&uq->uq_key);
1981 * Unlock a PI mutex.
1984 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1986 struct umtx_key key;
1987 struct umtx_q *uq_first, *uq_first2, *uq_me;
1988 struct umtx_pi *pi, *pi2;
1989 uint32_t id, new_owner, old, owner;
1990 int count, error, pri;
1994 * Make sure we own this mtx.
1996 error = fueword32(&m->m_owner, &owner);
2000 if ((owner & ~UMUTEX_CONTESTED) != id)
2003 new_owner = umtx_unlock_val(flags, rb);
2005 /* This should be done in userland */
2006 if ((owner & UMUTEX_CONTESTED) == 0) {
2007 error = casueword32(&m->m_owner, owner, &old, new_owner);
2015 /* We should only ever be in here for contested locks */
2016 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2017 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2023 count = umtxq_count_pi(&key, &uq_first);
2024 if (uq_first != NULL) {
2025 mtx_lock(&umtx_lock);
2026 pi = uq_first->uq_pi_blocked;
2027 KASSERT(pi != NULL, ("pi == NULL?"));
2028 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2029 mtx_unlock(&umtx_lock);
2032 umtx_key_release(&key);
2033 /* userland messed the mutex */
2036 uq_me = td->td_umtxq;
2037 if (pi->pi_owner == td)
2039 /* get highest priority thread which is still sleeping. */
2040 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2041 while (uq_first != NULL &&
2042 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2043 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2046 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2047 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2048 if (uq_first2 != NULL) {
2049 if (pri > UPRI(uq_first2->uq_thread))
2050 pri = UPRI(uq_first2->uq_thread);
2054 sched_lend_user_prio(td, pri);
2056 mtx_unlock(&umtx_lock);
2058 umtxq_signal_thread(uq_first);
2060 pi = umtx_pi_lookup(&key);
2062 * A umtx_pi can exist if a signal or timeout removed the
2063 * last waiter from the umtxq, but there is still
2064 * a thread in do_lock_pi() holding the umtx_pi.
2068 * The umtx_pi can be unowned, such as when a thread
2069 * has just entered do_lock_pi(), allocated the
2070 * umtx_pi, and unlocked the umtxq.
2071 * If the current thread owns it, it must disown it.
2073 mtx_lock(&umtx_lock);
2074 if (pi->pi_owner == td)
2076 mtx_unlock(&umtx_lock);
2082 * When unlocking the umtx, it must be marked as unowned if
2083 * there is zero or one thread only waiting for it.
2084 * Otherwise, it must be marked as contested.
2088 new_owner |= UMUTEX_CONTESTED;
2089 error = casueword32(&m->m_owner, owner, &old, new_owner);
2091 umtxq_unbusy_unlocked(&key);
2092 umtx_key_release(&key);
2104 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2105 struct _umtx_time *timeout, int try)
2107 struct abs_timeout timo;
2108 struct umtx_q *uq, *uq2;
2112 int error, pri, old_inherited_pri, su, rv;
2116 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2117 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2121 if (timeout != NULL)
2122 abs_timeout_init2(&timo, timeout);
2124 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2126 old_inherited_pri = uq->uq_inherited_pri;
2127 umtxq_lock(&uq->uq_key);
2128 umtxq_busy(&uq->uq_key);
2129 umtxq_unlock(&uq->uq_key);
2131 rv = fueword32(&m->m_ceilings[0], &ceiling);
2136 ceiling = RTP_PRIO_MAX - ceiling;
2137 if (ceiling > RTP_PRIO_MAX) {
2142 mtx_lock(&umtx_lock);
2143 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2144 mtx_unlock(&umtx_lock);
2148 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2149 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2151 if (uq->uq_inherited_pri < UPRI(td))
2152 sched_lend_user_prio(td, uq->uq_inherited_pri);
2155 mtx_unlock(&umtx_lock);
2157 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2158 id | UMUTEX_CONTESTED);
2159 /* The address was invalid. */
2165 if (owner == UMUTEX_CONTESTED) {
2168 } else if (owner == UMUTEX_RB_OWNERDEAD) {
2169 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2170 &owner, id | UMUTEX_CONTESTED);
2175 if (owner == UMUTEX_RB_OWNERDEAD) {
2176 error = EOWNERDEAD; /* success */
2180 } else if (owner == UMUTEX_RB_NOTRECOV) {
2181 error = ENOTRECOVERABLE;
2191 * If we caught a signal, we have retried and now
2197 umtxq_lock(&uq->uq_key);
2199 umtxq_unbusy(&uq->uq_key);
2200 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2203 umtxq_unlock(&uq->uq_key);
2205 mtx_lock(&umtx_lock);
2206 uq->uq_inherited_pri = old_inherited_pri;
2208 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2209 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2211 if (pri > UPRI(uq2->uq_thread))
2212 pri = UPRI(uq2->uq_thread);
2215 if (pri > uq->uq_inherited_pri)
2216 pri = uq->uq_inherited_pri;
2218 sched_lend_user_prio(td, pri);
2220 mtx_unlock(&umtx_lock);
2223 if (error != 0 && error != EOWNERDEAD) {
2224 mtx_lock(&umtx_lock);
2225 uq->uq_inherited_pri = old_inherited_pri;
2227 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2228 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2230 if (pri > UPRI(uq2->uq_thread))
2231 pri = UPRI(uq2->uq_thread);
2234 if (pri > uq->uq_inherited_pri)
2235 pri = uq->uq_inherited_pri;
2237 sched_lend_user_prio(td, pri);
2239 mtx_unlock(&umtx_lock);
2243 umtxq_unbusy_unlocked(&uq->uq_key);
2244 umtx_key_release(&uq->uq_key);
2249 * Unlock a PP mutex.
2252 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2254 struct umtx_key key;
2255 struct umtx_q *uq, *uq2;
2257 uint32_t id, owner, rceiling;
2258 int error, pri, new_inherited_pri, su;
2262 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2265 * Make sure we own this mtx.
2267 error = fueword32(&m->m_owner, &owner);
2271 if ((owner & ~UMUTEX_CONTESTED) != id)
2274 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2279 new_inherited_pri = PRI_MAX;
2281 rceiling = RTP_PRIO_MAX - rceiling;
2282 if (rceiling > RTP_PRIO_MAX)
2284 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2287 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2288 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2295 * For priority protected mutex, always set unlocked state
2296 * to UMUTEX_CONTESTED, so that userland always enters kernel
2297 * to lock the mutex, it is necessary because thread priority
2298 * has to be adjusted for such mutex.
2300 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2305 umtxq_signal(&key, 1);
2312 mtx_lock(&umtx_lock);
2314 uq->uq_inherited_pri = new_inherited_pri;
2316 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2317 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2319 if (pri > UPRI(uq2->uq_thread))
2320 pri = UPRI(uq2->uq_thread);
2323 if (pri > uq->uq_inherited_pri)
2324 pri = uq->uq_inherited_pri;
2326 sched_lend_user_prio(td, pri);
2328 mtx_unlock(&umtx_lock);
2330 umtx_key_release(&key);
2335 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2336 uint32_t *old_ceiling)
2339 uint32_t flags, id, owner, save_ceiling;
2342 error = fueword32(&m->m_flags, &flags);
2345 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2347 if (ceiling > RTP_PRIO_MAX)
2351 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2352 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2356 umtxq_lock(&uq->uq_key);
2357 umtxq_busy(&uq->uq_key);
2358 umtxq_unlock(&uq->uq_key);
2360 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2366 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2367 id | UMUTEX_CONTESTED);
2373 if (owner == UMUTEX_CONTESTED) {
2374 rv = suword32(&m->m_ceilings[0], ceiling);
2375 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2376 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2380 if ((owner & ~UMUTEX_CONTESTED) == id) {
2381 rv = suword32(&m->m_ceilings[0], ceiling);
2382 error = rv == 0 ? 0 : EFAULT;
2386 if (owner == UMUTEX_RB_OWNERDEAD) {
2389 } else if (owner == UMUTEX_RB_NOTRECOV) {
2390 error = ENOTRECOVERABLE;
2395 * If we caught a signal, we have retried and now
2402 * We set the contested bit, sleep. Otherwise the lock changed
2403 * and we need to retry or we lost a race to the thread
2404 * unlocking the umtx.
2406 umtxq_lock(&uq->uq_key);
2408 umtxq_unbusy(&uq->uq_key);
2409 error = umtxq_sleep(uq, "umtxpp", NULL);
2411 umtxq_unlock(&uq->uq_key);
2413 umtxq_lock(&uq->uq_key);
2415 umtxq_signal(&uq->uq_key, INT_MAX);
2416 umtxq_unbusy(&uq->uq_key);
2417 umtxq_unlock(&uq->uq_key);
2418 umtx_key_release(&uq->uq_key);
2419 if (error == 0 && old_ceiling != NULL) {
2420 rv = suword32(old_ceiling, save_ceiling);
2421 error = rv == 0 ? 0 : EFAULT;
2427 * Lock a userland POSIX mutex.
2430 do_lock_umutex(struct thread *td, struct umutex *m,
2431 struct _umtx_time *timeout, int mode)
2436 error = fueword32(&m->m_flags, &flags);
2440 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2442 error = do_lock_normal(td, m, flags, timeout, mode);
2444 case UMUTEX_PRIO_INHERIT:
2445 error = do_lock_pi(td, m, flags, timeout, mode);
2447 case UMUTEX_PRIO_PROTECT:
2448 error = do_lock_pp(td, m, flags, timeout, mode);
2453 if (timeout == NULL) {
2454 if (error == EINTR && mode != _UMUTEX_WAIT)
2457 /* Timed-locking is not restarted. */
2458 if (error == ERESTART)
2465 * Unlock a userland POSIX mutex.
2468 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2473 error = fueword32(&m->m_flags, &flags);
2477 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2479 return (do_unlock_normal(td, m, flags, rb));
2480 case UMUTEX_PRIO_INHERIT:
2481 return (do_unlock_pi(td, m, flags, rb));
2482 case UMUTEX_PRIO_PROTECT:
2483 return (do_unlock_pp(td, m, flags, rb));
2490 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2491 struct timespec *timeout, u_long wflags)
2493 struct abs_timeout timo;
2495 uint32_t flags, clockid, hasw;
2499 error = fueword32(&cv->c_flags, &flags);
2502 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2506 if ((wflags & CVWAIT_CLOCKID) != 0) {
2507 error = fueword32(&cv->c_clockid, &clockid);
2509 umtx_key_release(&uq->uq_key);
2512 if (clockid < CLOCK_REALTIME ||
2513 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2514 /* hmm, only HW clock id will work. */
2515 umtx_key_release(&uq->uq_key);
2519 clockid = CLOCK_REALTIME;
2522 umtxq_lock(&uq->uq_key);
2523 umtxq_busy(&uq->uq_key);
2525 umtxq_unlock(&uq->uq_key);
2528 * Set c_has_waiters to 1 before releasing user mutex, also
2529 * don't modify cache line when unnecessary.
2531 error = fueword32(&cv->c_has_waiters, &hasw);
2532 if (error == 0 && hasw == 0)
2533 suword32(&cv->c_has_waiters, 1);
2535 umtxq_unbusy_unlocked(&uq->uq_key);
2537 error = do_unlock_umutex(td, m, false);
2539 if (timeout != NULL)
2540 abs_timeout_init(&timo, clockid, (wflags & CVWAIT_ABSTIME) != 0,
2543 umtxq_lock(&uq->uq_key);
2545 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2549 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2553 * This must be timeout,interrupted by signal or
2554 * surprious wakeup, clear c_has_waiter flag when
2557 umtxq_busy(&uq->uq_key);
2558 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2559 int oldlen = uq->uq_cur_queue->length;
2562 umtxq_unlock(&uq->uq_key);
2563 suword32(&cv->c_has_waiters, 0);
2564 umtxq_lock(&uq->uq_key);
2567 umtxq_unbusy(&uq->uq_key);
2568 if (error == ERESTART)
2572 umtxq_unlock(&uq->uq_key);
2573 umtx_key_release(&uq->uq_key);
2578 * Signal a userland condition variable.
2581 do_cv_signal(struct thread *td, struct ucond *cv)
2583 struct umtx_key key;
2584 int error, cnt, nwake;
2587 error = fueword32(&cv->c_flags, &flags);
2590 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2594 cnt = umtxq_count(&key);
2595 nwake = umtxq_signal(&key, 1);
2598 error = suword32(&cv->c_has_waiters, 0);
2605 umtx_key_release(&key);
2610 do_cv_broadcast(struct thread *td, struct ucond *cv)
2612 struct umtx_key key;
2616 error = fueword32(&cv->c_flags, &flags);
2619 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2624 umtxq_signal(&key, INT_MAX);
2627 error = suword32(&cv->c_has_waiters, 0);
2631 umtxq_unbusy_unlocked(&key);
2633 umtx_key_release(&key);
2638 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
2640 struct abs_timeout timo;
2642 uint32_t flags, wrflags;
2643 int32_t state, oldstate;
2644 int32_t blocked_readers;
2645 int error, error1, rv;
2648 error = fueword32(&rwlock->rw_flags, &flags);
2651 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2655 if (timeout != NULL)
2656 abs_timeout_init2(&timo, timeout);
2658 wrflags = URWLOCK_WRITE_OWNER;
2659 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2660 wrflags |= URWLOCK_WRITE_WAITERS;
2663 rv = fueword32(&rwlock->rw_state, &state);
2665 umtx_key_release(&uq->uq_key);
2669 /* try to lock it */
2670 while (!(state & wrflags)) {
2671 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2672 umtx_key_release(&uq->uq_key);
2675 rv = casueword32(&rwlock->rw_state, state,
2676 &oldstate, state + 1);
2678 umtx_key_release(&uq->uq_key);
2681 if (oldstate == state) {
2682 umtx_key_release(&uq->uq_key);
2685 error = umtxq_check_susp(td);
2694 /* grab monitor lock */
2695 umtxq_lock(&uq->uq_key);
2696 umtxq_busy(&uq->uq_key);
2697 umtxq_unlock(&uq->uq_key);
2700 * re-read the state, in case it changed between the try-lock above
2701 * and the check below
2703 rv = fueword32(&rwlock->rw_state, &state);
2707 /* set read contention bit */
2708 while (error == 0 && (state & wrflags) &&
2709 !(state & URWLOCK_READ_WAITERS)) {
2710 rv = casueword32(&rwlock->rw_state, state,
2711 &oldstate, state | URWLOCK_READ_WAITERS);
2716 if (oldstate == state)
2719 error = umtxq_check_susp(td);
2724 umtxq_unbusy_unlocked(&uq->uq_key);
2728 /* state is changed while setting flags, restart */
2729 if (!(state & wrflags)) {
2730 umtxq_unbusy_unlocked(&uq->uq_key);
2731 error = umtxq_check_susp(td);
2738 /* contention bit is set, before sleeping, increase read waiter count */
2739 rv = fueword32(&rwlock->rw_blocked_readers,
2742 umtxq_unbusy_unlocked(&uq->uq_key);
2746 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2748 while (state & wrflags) {
2749 umtxq_lock(&uq->uq_key);
2751 umtxq_unbusy(&uq->uq_key);
2753 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2756 umtxq_busy(&uq->uq_key);
2758 umtxq_unlock(&uq->uq_key);
2761 rv = fueword32(&rwlock->rw_state, &state);
2768 /* decrease read waiter count, and may clear read contention bit */
2769 rv = fueword32(&rwlock->rw_blocked_readers,
2772 umtxq_unbusy_unlocked(&uq->uq_key);
2776 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2777 if (blocked_readers == 1) {
2778 rv = fueword32(&rwlock->rw_state, &state);
2780 umtxq_unbusy_unlocked(&uq->uq_key);
2785 rv = casueword32(&rwlock->rw_state, state,
2786 &oldstate, state & ~URWLOCK_READ_WAITERS);
2791 if (oldstate == state)
2794 error1 = umtxq_check_susp(td);
2803 umtxq_unbusy_unlocked(&uq->uq_key);
2807 umtx_key_release(&uq->uq_key);
2808 if (error == ERESTART)
2814 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2816 struct abs_timeout timo;
2819 int32_t state, oldstate;
2820 int32_t blocked_writers;
2821 int32_t blocked_readers;
2822 int error, error1, rv;
2825 error = fueword32(&rwlock->rw_flags, &flags);
2828 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2832 if (timeout != NULL)
2833 abs_timeout_init2(&timo, timeout);
2835 blocked_readers = 0;
2837 rv = fueword32(&rwlock->rw_state, &state);
2839 umtx_key_release(&uq->uq_key);
2842 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2843 rv = casueword32(&rwlock->rw_state, state,
2844 &oldstate, state | URWLOCK_WRITE_OWNER);
2846 umtx_key_release(&uq->uq_key);
2849 if (oldstate == state) {
2850 umtx_key_release(&uq->uq_key);
2854 error = umtxq_check_susp(td);
2860 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2861 blocked_readers != 0) {
2862 umtxq_lock(&uq->uq_key);
2863 umtxq_busy(&uq->uq_key);
2864 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2865 umtxq_unbusy(&uq->uq_key);
2866 umtxq_unlock(&uq->uq_key);
2872 /* grab monitor lock */
2873 umtxq_lock(&uq->uq_key);
2874 umtxq_busy(&uq->uq_key);
2875 umtxq_unlock(&uq->uq_key);
2878 * re-read the state, in case it changed between the try-lock above
2879 * and the check below
2881 rv = fueword32(&rwlock->rw_state, &state);
2885 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
2886 URWLOCK_READER_COUNT(state) != 0) &&
2887 (state & URWLOCK_WRITE_WAITERS) == 0) {
2888 rv = casueword32(&rwlock->rw_state, state,
2889 &oldstate, state | URWLOCK_WRITE_WAITERS);
2894 if (oldstate == state)
2897 error = umtxq_check_susp(td);
2902 umtxq_unbusy_unlocked(&uq->uq_key);
2906 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2907 umtxq_unbusy_unlocked(&uq->uq_key);
2908 error = umtxq_check_susp(td);
2914 rv = fueword32(&rwlock->rw_blocked_writers,
2917 umtxq_unbusy_unlocked(&uq->uq_key);
2921 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2923 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2924 umtxq_lock(&uq->uq_key);
2925 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2926 umtxq_unbusy(&uq->uq_key);
2928 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2931 umtxq_busy(&uq->uq_key);
2932 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2933 umtxq_unlock(&uq->uq_key);
2936 rv = fueword32(&rwlock->rw_state, &state);
2943 rv = fueword32(&rwlock->rw_blocked_writers,
2946 umtxq_unbusy_unlocked(&uq->uq_key);
2950 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2951 if (blocked_writers == 1) {
2952 rv = fueword32(&rwlock->rw_state, &state);
2954 umtxq_unbusy_unlocked(&uq->uq_key);
2959 rv = casueword32(&rwlock->rw_state, state,
2960 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
2965 if (oldstate == state)
2968 error1 = umtxq_check_susp(td);
2970 * We are leaving the URWLOCK_WRITE_WAITERS
2971 * behind, but this should not harm the
2980 rv = fueword32(&rwlock->rw_blocked_readers,
2983 umtxq_unbusy_unlocked(&uq->uq_key);
2988 blocked_readers = 0;
2990 umtxq_unbusy_unlocked(&uq->uq_key);
2993 umtx_key_release(&uq->uq_key);
2994 if (error == ERESTART)
3000 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3004 int32_t state, oldstate;
3005 int error, rv, q, count;
3008 error = fueword32(&rwlock->rw_flags, &flags);
3011 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3015 error = fueword32(&rwlock->rw_state, &state);
3020 if (state & URWLOCK_WRITE_OWNER) {
3022 rv = casueword32(&rwlock->rw_state, state,
3023 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3028 if (oldstate != state) {
3030 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3034 error = umtxq_check_susp(td);
3040 } else if (URWLOCK_READER_COUNT(state) != 0) {
3042 rv = casueword32(&rwlock->rw_state, state,
3043 &oldstate, state - 1);
3048 if (oldstate != state) {
3050 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3054 error = umtxq_check_susp(td);
3067 if (!(flags & URWLOCK_PREFER_READER)) {
3068 if (state & URWLOCK_WRITE_WAITERS) {
3070 q = UMTX_EXCLUSIVE_QUEUE;
3071 } else if (state & URWLOCK_READ_WAITERS) {
3073 q = UMTX_SHARED_QUEUE;
3076 if (state & URWLOCK_READ_WAITERS) {
3078 q = UMTX_SHARED_QUEUE;
3079 } else if (state & URWLOCK_WRITE_WAITERS) {
3081 q = UMTX_EXCLUSIVE_QUEUE;
3086 umtxq_lock(&uq->uq_key);
3087 umtxq_busy(&uq->uq_key);
3088 umtxq_signal_queue(&uq->uq_key, count, q);
3089 umtxq_unbusy(&uq->uq_key);
3090 umtxq_unlock(&uq->uq_key);
3093 umtx_key_release(&uq->uq_key);
3097 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3099 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3101 struct abs_timeout timo;
3103 uint32_t flags, count, count1;
3107 error = fueword32(&sem->_flags, &flags);
3110 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3114 if (timeout != NULL)
3115 abs_timeout_init2(&timo, timeout);
3117 umtxq_lock(&uq->uq_key);
3118 umtxq_busy(&uq->uq_key);
3120 umtxq_unlock(&uq->uq_key);
3121 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3123 rv = fueword32(&sem->_count, &count);
3124 if (rv == -1 || count != 0) {
3125 umtxq_lock(&uq->uq_key);
3126 umtxq_unbusy(&uq->uq_key);
3128 umtxq_unlock(&uq->uq_key);
3129 umtx_key_release(&uq->uq_key);
3130 return (rv == -1 ? EFAULT : 0);
3132 umtxq_lock(&uq->uq_key);
3133 umtxq_unbusy(&uq->uq_key);
3135 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3137 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3141 /* A relative timeout cannot be restarted. */
3142 if (error == ERESTART && timeout != NULL &&
3143 (timeout->_flags & UMTX_ABSTIME) == 0)
3146 umtxq_unlock(&uq->uq_key);
3147 umtx_key_release(&uq->uq_key);
3152 * Signal a userland semaphore.
3155 do_sem_wake(struct thread *td, struct _usem *sem)
3157 struct umtx_key key;
3161 error = fueword32(&sem->_flags, &flags);
3164 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3168 cnt = umtxq_count(&key);
3171 * Check if count is greater than 0, this means the memory is
3172 * still being referenced by user code, so we can safely
3173 * update _has_waiters flag.
3177 error = suword32(&sem->_has_waiters, 0);
3182 umtxq_signal(&key, 1);
3186 umtx_key_release(&key);
3192 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3194 struct abs_timeout timo;
3196 uint32_t count, flags;
3200 flags = fuword32(&sem->_flags);
3201 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3205 if (timeout != NULL)
3206 abs_timeout_init2(&timo, timeout);
3208 umtxq_lock(&uq->uq_key);
3209 umtxq_busy(&uq->uq_key);
3211 umtxq_unlock(&uq->uq_key);
3212 rv = fueword32(&sem->_count, &count);
3214 umtxq_lock(&uq->uq_key);
3215 umtxq_unbusy(&uq->uq_key);
3217 umtxq_unlock(&uq->uq_key);
3218 umtx_key_release(&uq->uq_key);
3222 if (USEM_COUNT(count) != 0) {
3223 umtxq_lock(&uq->uq_key);
3224 umtxq_unbusy(&uq->uq_key);
3226 umtxq_unlock(&uq->uq_key);
3227 umtx_key_release(&uq->uq_key);
3230 if (count == USEM_HAS_WAITERS)
3232 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3234 umtxq_lock(&uq->uq_key);
3235 umtxq_unbusy(&uq->uq_key);
3237 umtxq_unlock(&uq->uq_key);
3238 umtx_key_release(&uq->uq_key);
3244 umtxq_lock(&uq->uq_key);
3245 umtxq_unbusy(&uq->uq_key);
3247 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3249 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3253 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3254 /* A relative timeout cannot be restarted. */
3255 if (error == ERESTART)
3257 if (error == EINTR) {
3258 abs_timeout_update(&timo);
3259 timeout->_timeout = timo.end;
3260 timespecsub(&timeout->_timeout, &timo.cur);
3264 umtxq_unlock(&uq->uq_key);
3265 umtx_key_release(&uq->uq_key);
3270 * Signal a userland semaphore.
3273 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3275 struct umtx_key key;
3277 uint32_t count, flags;
3279 rv = fueword32(&sem->_flags, &flags);
3282 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3286 cnt = umtxq_count(&key);
3289 * If this was the last sleeping thread, clear the waiters
3294 rv = fueword32(&sem->_count, &count);
3295 while (rv != -1 && count & USEM_HAS_WAITERS)
3296 rv = casueword32(&sem->_count, count, &count,
3297 count & ~USEM_HAS_WAITERS);
3303 umtxq_signal(&key, 1);
3307 umtx_key_release(&key);
3312 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
3316 error = copyin(addr, tsp, sizeof(struct timespec));
3318 if (tsp->tv_sec < 0 ||
3319 tsp->tv_nsec >= 1000000000 ||
3327 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
3331 if (size <= sizeof(struct timespec)) {
3332 tp->_clockid = CLOCK_REALTIME;
3334 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
3336 error = copyin(addr, tp, sizeof(struct _umtx_time));
3339 if (tp->_timeout.tv_sec < 0 ||
3340 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3346 __umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap)
3349 return (EOPNOTSUPP);
3353 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
3355 struct _umtx_time timeout, *tm_p;
3358 if (uap->uaddr2 == NULL)
3361 error = umtx_copyin_umtx_time(
3362 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3367 return (do_wait(td, uap->obj, uap->val, tm_p, 0, 0));
3371 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
3373 struct _umtx_time timeout, *tm_p;
3376 if (uap->uaddr2 == NULL)
3379 error = umtx_copyin_umtx_time(
3380 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3385 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3389 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3391 struct _umtx_time *tm_p, timeout;
3394 if (uap->uaddr2 == NULL)
3397 error = umtx_copyin_umtx_time(
3398 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3403 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3407 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3410 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3413 #define BATCH_SIZE 128
3415 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3417 char *uaddrs[BATCH_SIZE], **upp;
3418 int count, error, i, pos, tocopy;
3420 upp = (char **)uap->obj;
3422 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3424 tocopy = MIN(count, BATCH_SIZE);
3425 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3428 for (i = 0; i < tocopy; ++i)
3429 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3436 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3439 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3443 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3445 struct _umtx_time *tm_p, timeout;
3448 /* Allow a null timespec (wait forever). */
3449 if (uap->uaddr2 == NULL)
3452 error = umtx_copyin_umtx_time(
3453 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3458 return (do_lock_umutex(td, uap->obj, tm_p, 0));
3462 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3465 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
3469 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3471 struct _umtx_time *tm_p, timeout;
3474 /* Allow a null timespec (wait forever). */
3475 if (uap->uaddr2 == NULL)
3478 error = umtx_copyin_umtx_time(
3479 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3484 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
3488 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3491 return (do_wake_umutex(td, uap->obj));
3495 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3498 return (do_unlock_umutex(td, uap->obj, false));
3502 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3505 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
3509 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3511 struct timespec *ts, timeout;
3514 /* Allow a null timespec (wait forever). */
3515 if (uap->uaddr2 == NULL)
3518 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3523 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3527 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3530 return (do_cv_signal(td, uap->obj));
3534 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3537 return (do_cv_broadcast(td, uap->obj));
3541 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3543 struct _umtx_time timeout;
3546 /* Allow a null timespec (wait forever). */
3547 if (uap->uaddr2 == NULL) {
3548 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3550 error = umtx_copyin_umtx_time(uap->uaddr2,
3551 (size_t)uap->uaddr1, &timeout);
3554 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3560 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3562 struct _umtx_time timeout;
3565 /* Allow a null timespec (wait forever). */
3566 if (uap->uaddr2 == NULL) {
3567 error = do_rw_wrlock(td, uap->obj, 0);
3569 error = umtx_copyin_umtx_time(uap->uaddr2,
3570 (size_t)uap->uaddr1, &timeout);
3574 error = do_rw_wrlock(td, uap->obj, &timeout);
3580 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3583 return (do_rw_unlock(td, uap->obj));
3586 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3588 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3590 struct _umtx_time *tm_p, timeout;
3593 /* Allow a null timespec (wait forever). */
3594 if (uap->uaddr2 == NULL)
3597 error = umtx_copyin_umtx_time(
3598 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3603 return (do_sem_wait(td, uap->obj, tm_p));
3607 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3610 return (do_sem_wake(td, uap->obj));
3615 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3618 return (do_wake2_umutex(td, uap->obj, uap->val));
3622 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
3624 struct _umtx_time *tm_p, timeout;
3628 /* Allow a null timespec (wait forever). */
3629 if (uap->uaddr2 == NULL) {
3633 uasize = (size_t)uap->uaddr1;
3634 error = umtx_copyin_umtx_time(uap->uaddr2, uasize, &timeout);
3639 error = do_sem2_wait(td, uap->obj, tm_p);
3640 if (error == EINTR && uap->uaddr2 != NULL &&
3641 (timeout._flags & UMTX_ABSTIME) == 0 &&
3642 uasize >= sizeof(struct _umtx_time) + sizeof(struct timespec)) {
3643 error = copyout(&timeout._timeout,
3644 (struct _umtx_time *)uap->uaddr2 + 1,
3645 sizeof(struct timespec));
3655 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap)
3658 return (do_sem2_wake(td, uap->obj));
3661 #define USHM_OBJ_UMTX(o) \
3662 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
3664 #define USHMF_REG_LINKED 0x0001
3665 #define USHMF_OBJ_LINKED 0x0002
3666 struct umtx_shm_reg {
3667 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
3668 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
3669 struct umtx_key ushm_key;
3670 struct ucred *ushm_cred;
3671 struct shmfd *ushm_obj;
3676 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
3677 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
3679 static uma_zone_t umtx_shm_reg_zone;
3680 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
3681 static struct mtx umtx_shm_lock;
3682 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
3683 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
3685 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
3688 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
3690 struct umtx_shm_reg_head d;
3691 struct umtx_shm_reg *reg, *reg1;
3694 mtx_lock(&umtx_shm_lock);
3695 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
3696 mtx_unlock(&umtx_shm_lock);
3697 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
3698 TAILQ_REMOVE(&d, reg, ushm_reg_link);
3699 umtx_shm_free_reg(reg);
3703 static struct task umtx_shm_reg_delfree_task =
3704 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
3706 static struct umtx_shm_reg *
3707 umtx_shm_find_reg_locked(const struct umtx_key *key)
3709 struct umtx_shm_reg *reg;
3710 struct umtx_shm_reg_head *reg_head;
3712 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
3713 mtx_assert(&umtx_shm_lock, MA_OWNED);
3714 reg_head = &umtx_shm_registry[key->hash];
3715 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
3716 KASSERT(reg->ushm_key.shared,
3717 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
3718 if (reg->ushm_key.info.shared.object ==
3719 key->info.shared.object &&
3720 reg->ushm_key.info.shared.offset ==
3721 key->info.shared.offset) {
3722 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
3723 KASSERT(reg->ushm_refcnt > 0,
3724 ("reg %p refcnt 0 onlist", reg));
3725 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
3726 ("reg %p not linked", reg));
3734 static struct umtx_shm_reg *
3735 umtx_shm_find_reg(const struct umtx_key *key)
3737 struct umtx_shm_reg *reg;
3739 mtx_lock(&umtx_shm_lock);
3740 reg = umtx_shm_find_reg_locked(key);
3741 mtx_unlock(&umtx_shm_lock);
3746 umtx_shm_free_reg(struct umtx_shm_reg *reg)
3749 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
3750 crfree(reg->ushm_cred);
3751 shm_drop(reg->ushm_obj);
3752 uma_zfree(umtx_shm_reg_zone, reg);
3756 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
3760 mtx_assert(&umtx_shm_lock, MA_OWNED);
3761 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
3763 res = reg->ushm_refcnt == 0;
3765 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
3766 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
3767 reg, ushm_reg_link);
3768 reg->ushm_flags &= ~USHMF_REG_LINKED;
3770 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
3771 LIST_REMOVE(reg, ushm_obj_link);
3772 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
3779 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
3785 object = reg->ushm_obj->shm_object;
3786 VM_OBJECT_WLOCK(object);
3787 object->flags |= OBJ_UMTXDEAD;
3788 VM_OBJECT_WUNLOCK(object);
3790 mtx_lock(&umtx_shm_lock);
3791 dofree = umtx_shm_unref_reg_locked(reg, force);
3792 mtx_unlock(&umtx_shm_lock);
3794 umtx_shm_free_reg(reg);
3798 umtx_shm_object_init(vm_object_t object)
3801 LIST_INIT(USHM_OBJ_UMTX(object));
3805 umtx_shm_object_terminated(vm_object_t object)
3807 struct umtx_shm_reg *reg, *reg1;
3811 mtx_lock(&umtx_shm_lock);
3812 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
3813 if (umtx_shm_unref_reg_locked(reg, true)) {
3814 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
3819 mtx_unlock(&umtx_shm_lock);
3821 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
3825 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
3826 struct umtx_shm_reg **res)
3828 struct umtx_shm_reg *reg, *reg1;
3832 reg = umtx_shm_find_reg(key);
3837 cred = td->td_ucred;
3838 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
3840 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
3841 reg->ushm_refcnt = 1;
3842 bcopy(key, ®->ushm_key, sizeof(*key));
3843 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR);
3844 reg->ushm_cred = crhold(cred);
3845 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
3847 umtx_shm_free_reg(reg);
3850 mtx_lock(&umtx_shm_lock);
3851 reg1 = umtx_shm_find_reg_locked(key);
3853 mtx_unlock(&umtx_shm_lock);
3854 umtx_shm_free_reg(reg);
3859 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
3860 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
3862 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
3863 mtx_unlock(&umtx_shm_lock);
3869 umtx_shm_alive(struct thread *td, void *addr)
3872 vm_map_entry_t entry;
3879 map = &td->td_proc->p_vmspace->vm_map;
3880 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
3881 &object, &pindex, &prot, &wired);
3882 if (res != KERN_SUCCESS)
3887 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
3888 vm_map_lookup_done(map, entry);
3897 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
3898 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3899 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
3900 for (i = 0; i < nitems(umtx_shm_registry); i++)
3901 TAILQ_INIT(&umtx_shm_registry[i]);
3905 umtx_shm(struct thread *td, void *addr, u_int flags)
3907 struct umtx_key key;
3908 struct umtx_shm_reg *reg;
3912 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
3913 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
3915 if ((flags & UMTX_SHM_ALIVE) != 0)
3916 return (umtx_shm_alive(td, addr));
3917 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
3920 KASSERT(key.shared == 1, ("non-shared key"));
3921 if ((flags & UMTX_SHM_CREAT) != 0) {
3922 error = umtx_shm_create_reg(td, &key, ®);
3924 reg = umtx_shm_find_reg(&key);
3928 umtx_key_release(&key);
3931 KASSERT(reg != NULL, ("no reg"));
3932 if ((flags & UMTX_SHM_DESTROY) != 0) {
3933 umtx_shm_unref_reg(reg, true);
3937 error = mac_posixshm_check_open(td->td_ucred,
3938 reg->ushm_obj, FFLAGS(O_RDWR));
3941 error = shm_access(reg->ushm_obj, td->td_ucred,
3945 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
3947 shm_hold(reg->ushm_obj);
3948 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
3950 td->td_retval[0] = fd;
3954 umtx_shm_unref_reg(reg, false);
3959 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap)
3962 return (umtx_shm(td, uap->uaddr1, uap->val));
3966 umtx_robust_lists(struct thread *td, struct umtx_robust_lists_params *rbp)
3969 td->td_rb_list = rbp->robust_list_offset;
3970 td->td_rbp_list = rbp->robust_priv_list_offset;
3971 td->td_rb_inact = rbp->robust_inact_offset;
3976 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap)
3978 struct umtx_robust_lists_params rb;
3981 if (uap->val > sizeof(rb))
3983 bzero(&rb, sizeof(rb));
3984 error = copyin(uap->uaddr1, &rb, uap->val);
3987 return (umtx_robust_lists(td, &rb));
3990 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3992 static const _umtx_op_func op_table[] = {
3993 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
3994 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
3995 [UMTX_OP_WAIT] = __umtx_op_wait,
3996 [UMTX_OP_WAKE] = __umtx_op_wake,
3997 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
3998 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
3999 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4000 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4001 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4002 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4003 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4004 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4005 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4006 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4007 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4008 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4009 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4010 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4011 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4012 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4013 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4014 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4016 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4017 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4019 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4020 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4021 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4022 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4023 [UMTX_OP_SHM] = __umtx_op_shm,
4024 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4028 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4031 if ((unsigned)uap->op < nitems(op_table))
4032 return (*op_table[uap->op])(td, uap);
4036 #ifdef COMPAT_FREEBSD32
4043 struct umtx_time32 {
4044 struct timespec32 timeout;
4050 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
4052 struct timespec32 ts32;
4055 error = copyin(addr, &ts32, sizeof(struct timespec32));
4057 if (ts32.tv_sec < 0 ||
4058 ts32.tv_nsec >= 1000000000 ||
4062 tsp->tv_sec = ts32.tv_sec;
4063 tsp->tv_nsec = ts32.tv_nsec;
4070 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
4072 struct umtx_time32 t32;
4075 t32.clockid = CLOCK_REALTIME;
4077 if (size <= sizeof(struct timespec32))
4078 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
4080 error = copyin(addr, &t32, sizeof(struct umtx_time32));
4083 if (t32.timeout.tv_sec < 0 ||
4084 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
4086 tp->_timeout.tv_sec = t32.timeout.tv_sec;
4087 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
4088 tp->_flags = t32.flags;
4089 tp->_clockid = t32.clockid;
4094 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4096 struct _umtx_time *tm_p, timeout;
4099 if (uap->uaddr2 == NULL)
4102 error = umtx_copyin_umtx_time32(uap->uaddr2,
4103 (size_t)uap->uaddr1, &timeout);
4108 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
4112 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4114 struct _umtx_time *tm_p, timeout;
4117 /* Allow a null timespec (wait forever). */
4118 if (uap->uaddr2 == NULL)
4121 error = umtx_copyin_umtx_time(uap->uaddr2,
4122 (size_t)uap->uaddr1, &timeout);
4127 return (do_lock_umutex(td, uap->obj, tm_p, 0));
4131 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4133 struct _umtx_time *tm_p, timeout;
4136 /* Allow a null timespec (wait forever). */
4137 if (uap->uaddr2 == NULL)
4140 error = umtx_copyin_umtx_time32(uap->uaddr2,
4141 (size_t)uap->uaddr1, &timeout);
4146 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
4150 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4152 struct timespec *ts, timeout;
4155 /* Allow a null timespec (wait forever). */
4156 if (uap->uaddr2 == NULL)
4159 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
4164 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
4168 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4170 struct _umtx_time timeout;
4173 /* Allow a null timespec (wait forever). */
4174 if (uap->uaddr2 == NULL) {
4175 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
4177 error = umtx_copyin_umtx_time32(uap->uaddr2,
4178 (size_t)uap->uaddr1, &timeout);
4181 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4187 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4189 struct _umtx_time timeout;
4192 /* Allow a null timespec (wait forever). */
4193 if (uap->uaddr2 == NULL) {
4194 error = do_rw_wrlock(td, uap->obj, 0);
4196 error = umtx_copyin_umtx_time32(uap->uaddr2,
4197 (size_t)uap->uaddr1, &timeout);
4200 error = do_rw_wrlock(td, uap->obj, &timeout);
4206 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
4208 struct _umtx_time *tm_p, timeout;
4211 if (uap->uaddr2 == NULL)
4214 error = umtx_copyin_umtx_time32(
4215 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
4220 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
4223 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4225 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4227 struct _umtx_time *tm_p, timeout;
4230 /* Allow a null timespec (wait forever). */
4231 if (uap->uaddr2 == NULL)
4234 error = umtx_copyin_umtx_time32(uap->uaddr2,
4235 (size_t)uap->uaddr1, &timeout);
4240 return (do_sem_wait(td, uap->obj, tm_p));
4245 __umtx_op_sem2_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4247 struct _umtx_time *tm_p, timeout;
4251 /* Allow a null timespec (wait forever). */
4252 if (uap->uaddr2 == NULL) {
4256 uasize = (size_t)uap->uaddr1;
4257 error = umtx_copyin_umtx_time32(uap->uaddr2, uasize, &timeout);
4262 error = do_sem2_wait(td, uap->obj, tm_p);
4263 if (error == EINTR && uap->uaddr2 != NULL &&
4264 (timeout._flags & UMTX_ABSTIME) == 0 &&
4265 uasize >= sizeof(struct umtx_time32) + sizeof(struct timespec32)) {
4266 struct timespec32 remain32 = {
4267 .tv_sec = timeout._timeout.tv_sec,
4268 .tv_nsec = timeout._timeout.tv_nsec
4270 error = copyout(&remain32,
4271 (struct umtx_time32 *)uap->uaddr2 + 1,
4272 sizeof(struct timespec32));
4282 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
4284 uint32_t uaddrs[BATCH_SIZE], **upp;
4285 int count, error, i, pos, tocopy;
4287 upp = (uint32_t **)uap->obj;
4289 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
4291 tocopy = MIN(count, BATCH_SIZE);
4292 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
4295 for (i = 0; i < tocopy; ++i)
4296 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
4303 struct umtx_robust_lists_params_compat32 {
4304 uint32_t robust_list_offset;
4305 uint32_t robust_priv_list_offset;
4306 uint32_t robust_inact_offset;
4310 __umtx_op_robust_lists_compat32(struct thread *td, struct _umtx_op_args *uap)
4312 struct umtx_robust_lists_params rb;
4313 struct umtx_robust_lists_params_compat32 rb32;
4316 if (uap->val > sizeof(rb32))
4318 bzero(&rb, sizeof(rb));
4319 bzero(&rb32, sizeof(rb32));
4320 error = copyin(uap->uaddr1, &rb32, uap->val);
4323 rb.robust_list_offset = rb32.robust_list_offset;
4324 rb.robust_priv_list_offset = rb32.robust_priv_list_offset;
4325 rb.robust_inact_offset = rb32.robust_inact_offset;
4326 return (umtx_robust_lists(td, &rb));
4329 static const _umtx_op_func op_table_compat32[] = {
4330 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4331 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4332 [UMTX_OP_WAIT] = __umtx_op_wait_compat32,
4333 [UMTX_OP_WAKE] = __umtx_op_wake,
4334 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4335 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex_compat32,
4336 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4337 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4338 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait_compat32,
4339 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4340 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4341 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_compat32,
4342 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock_compat32,
4343 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock_compat32,
4344 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4345 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private_compat32,
4346 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4347 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex_compat32,
4348 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4349 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4350 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait_compat32,
4351 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4353 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4354 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4356 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private32,
4357 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4358 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait_compat32,
4359 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4360 [UMTX_OP_SHM] = __umtx_op_shm,
4361 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists_compat32,
4365 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
4368 if ((unsigned)uap->op < nitems(op_table_compat32)) {
4369 return (*op_table_compat32[uap->op])(td,
4370 (struct _umtx_op_args *)uap);
4377 umtx_thread_init(struct thread *td)
4380 td->td_umtxq = umtxq_alloc();
4381 td->td_umtxq->uq_thread = td;
4385 umtx_thread_fini(struct thread *td)
4388 umtxq_free(td->td_umtxq);
4392 * It will be called when new thread is created, e.g fork().
4395 umtx_thread_alloc(struct thread *td)
4400 uq->uq_inherited_pri = PRI_MAX;
4402 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4403 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4404 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4405 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4411 * Clear robust lists for all process' threads, not delaying the
4412 * cleanup to thread_exit hook, since the relevant address space is
4413 * destroyed right now.
4416 umtx_exec_hook(void *arg __unused, struct proc *p,
4417 struct image_params *imgp __unused)
4421 KASSERT(p == curproc, ("need curproc"));
4423 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4424 (p->p_flag & P_STOPPED_SINGLE) != 0,
4425 ("curproc must be single-threaded"));
4426 FOREACH_THREAD_IN_PROC(p, td) {
4427 KASSERT(td == curthread ||
4428 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4429 ("running thread %p %p", p, td));
4431 umtx_thread_cleanup(td);
4433 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4439 * thread_exit() hook.
4442 umtx_thread_exit(struct thread *td)
4445 umtx_thread_cleanup(td);
4449 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res)
4452 #ifdef COMPAT_FREEBSD32
4457 #ifdef COMPAT_FREEBSD32
4458 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4459 error = fueword32((void *)ptr, &res32);
4465 error = fueword((void *)ptr, &res1);
4475 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list)
4477 #ifdef COMPAT_FREEBSD32
4478 struct umutex32 m32;
4480 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4481 memcpy(&m32, m, sizeof(m32));
4482 *rb_list = m32.m_rb_lnk;
4485 *rb_list = m->m_rb_lnk;
4489 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact)
4494 KASSERT(td->td_proc == curproc, ("need current vmspace"));
4495 error = copyin((void *)rbp, &m, sizeof(m));
4498 if (rb_list != NULL)
4499 umtx_read_rb_list(td, &m, rb_list);
4500 if ((m.m_flags & UMUTEX_ROBUST) == 0)
4502 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
4503 /* inact is cleared after unlock, allow the inconsistency */
4504 return (inact ? 0 : EINVAL);
4505 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
4509 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
4518 error = umtx_read_uptr(td, rb_list, &rbp);
4519 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
4520 if (rbp == *rb_inact) {
4525 error = umtx_handle_rb(td, rbp, &rbp, inact);
4527 if (i == umtx_max_rb && umtx_verbose_rb) {
4528 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
4529 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
4531 if (error != 0 && umtx_verbose_rb) {
4532 uprintf("comm %s pid %d: handling %srb error %d\n",
4533 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
4538 * Clean up umtx data.
4541 umtx_thread_cleanup(struct thread *td)
4548 * Disown pi mutexes.
4552 mtx_lock(&umtx_lock);
4553 uq->uq_inherited_pri = PRI_MAX;
4554 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4555 pi->pi_owner = NULL;
4556 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4558 mtx_unlock(&umtx_lock);
4560 sched_lend_user_prio(td, PRI_MAX);
4565 * Handle terminated robust mutexes. Must be done after
4566 * robust pi disown, otherwise unlock could see unowned
4569 rb_inact = td->td_rb_inact;
4571 (void)umtx_read_uptr(td, rb_inact, &rb_inact);
4572 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "");
4573 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ");
4575 (void)umtx_handle_rb(td, rb_inact, NULL, true);