2 * Copyright (c) 2015, 2016 The FreeBSD Foundation
3 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
4 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_compat.h"
36 #include "opt_umtx_profiling.h"
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
42 #include <sys/filedesc.h>
43 #include <sys/limits.h>
45 #include <sys/malloc.h>
47 #include <sys/mutex.h>
50 #include <sys/resource.h>
51 #include <sys/resourcevar.h>
52 #include <sys/rwlock.h>
54 #include <sys/sched.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysent.h>
58 #include <sys/systm.h>
59 #include <sys/sysproto.h>
60 #include <sys/syscallsubr.h>
61 #include <sys/taskqueue.h>
63 #include <sys/eventhandler.h>
66 #include <security/mac/mac_framework.h>
69 #include <vm/vm_param.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
74 #include <machine/atomic.h>
75 #include <machine/cpu.h>
77 #ifdef COMPAT_FREEBSD32
78 #include <compat/freebsd32/freebsd32_proto.h>
82 #define _UMUTEX_WAIT 2
85 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
86 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
89 /* Priority inheritance mutex info. */
92 struct thread *pi_owner;
97 /* List entry to link umtx holding by thread */
98 TAILQ_ENTRY(umtx_pi) pi_link;
100 /* List entry in hash */
101 TAILQ_ENTRY(umtx_pi) pi_hashlink;
103 /* List for waiters */
104 TAILQ_HEAD(,umtx_q) pi_blocked;
106 /* Identify a userland lock object */
107 struct umtx_key pi_key;
110 /* A userland synchronous object user. */
112 /* Linked list for the hash. */
113 TAILQ_ENTRY(umtx_q) uq_link;
116 struct umtx_key uq_key;
120 #define UQF_UMTXQ 0x0001
122 /* The thread waits on. */
123 struct thread *uq_thread;
126 * Blocked on PI mutex. read can use chain lock
127 * or umtx_lock, write must have both chain lock and
128 * umtx_lock being hold.
130 struct umtx_pi *uq_pi_blocked;
132 /* On blocked list */
133 TAILQ_ENTRY(umtx_q) uq_lockq;
135 /* Thread contending with us */
136 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
138 /* Inherited priority from PP mutex */
139 u_char uq_inherited_pri;
141 /* Spare queue ready to be reused */
142 struct umtxq_queue *uq_spare_queue;
144 /* The queue we on */
145 struct umtxq_queue *uq_cur_queue;
148 TAILQ_HEAD(umtxq_head, umtx_q);
150 /* Per-key wait-queue */
152 struct umtxq_head head;
154 LIST_ENTRY(umtxq_queue) link;
158 LIST_HEAD(umtxq_list, umtxq_queue);
160 /* Userland lock object's wait-queue chain */
162 /* Lock for this chain. */
165 /* List of sleep queues. */
166 struct umtxq_list uc_queue[2];
167 #define UMTX_SHARED_QUEUE 0
168 #define UMTX_EXCLUSIVE_QUEUE 1
170 LIST_HEAD(, umtxq_queue) uc_spare_queue;
175 /* Chain lock waiters */
178 /* All PI in the list */
179 TAILQ_HEAD(,umtx_pi) uc_pi_list;
181 #ifdef UMTX_PROFILING
187 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
190 * Don't propagate time-sharing priority, there is a security reason,
191 * a user can simply introduce PI-mutex, let thread A lock the mutex,
192 * and let another thread B block on the mutex, because B is
193 * sleeping, its priority will be boosted, this causes A's priority to
194 * be boosted via priority propagating too and will never be lowered even
195 * if it is using 100%CPU, this is unfair to other processes.
198 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
199 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
200 PRI_MAX_TIMESHARE : (td)->td_user_pri)
202 #define GOLDEN_RATIO_PRIME 2654404609U
204 #define UMTX_CHAINS 512
206 #define UMTX_SHIFTS (__WORD_BIT - 9)
208 #define GET_SHARE(flags) \
209 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
211 #define BUSY_SPINS 200
215 bool is_abs_real; /* TIMER_ABSTIME && CLOCK_REALTIME* */
220 #ifdef COMPAT_FREEBSD32
222 volatile __lwpid_t m_owner; /* Owner of the mutex */
223 __uint32_t m_flags; /* Flags of the mutex */
224 __uint32_t m_ceilings[2]; /* Priority protect ceiling */
225 __uint32_t m_rb_lnk; /* Robust linkage */
227 __uint32_t m_spare[2];
230 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
231 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
232 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
235 int umtx_shm_vnobj_persistent = 0;
236 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
237 &umtx_shm_vnobj_persistent, 0,
238 "False forces destruction of umtx attached to file, on last close");
239 static int umtx_max_rb = 1000;
240 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
244 static uma_zone_t umtx_pi_zone;
245 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
246 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
247 static int umtx_pi_allocated;
249 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
250 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
251 &umtx_pi_allocated, 0, "Allocated umtx_pi");
252 static int umtx_verbose_rb = 1;
253 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
257 #ifdef UMTX_PROFILING
258 static long max_length;
259 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
260 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
263 static void abs_timeout_update(struct abs_timeout *timo);
265 static void umtx_shm_init(void);
266 static void umtxq_sysinit(void *);
267 static void umtxq_hash(struct umtx_key *key);
268 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
269 static void umtxq_lock(struct umtx_key *key);
270 static void umtxq_unlock(struct umtx_key *key);
271 static void umtxq_busy(struct umtx_key *key);
272 static void umtxq_unbusy(struct umtx_key *key);
273 static void umtxq_insert_queue(struct umtx_q *uq, int q);
274 static void umtxq_remove_queue(struct umtx_q *uq, int q);
275 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
276 static int umtxq_count(struct umtx_key *key);
277 static struct umtx_pi *umtx_pi_alloc(int);
278 static void umtx_pi_free(struct umtx_pi *pi);
279 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
281 static void umtx_thread_cleanup(struct thread *td);
282 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
283 struct image_params *imgp __unused);
284 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
286 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
287 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
288 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
290 static struct mtx umtx_lock;
292 #ifdef UMTX_PROFILING
294 umtx_init_profiling(void)
296 struct sysctl_oid *chain_oid;
300 for (i = 0; i < UMTX_CHAINS; ++i) {
301 snprintf(chain_name, sizeof(chain_name), "%d", i);
302 chain_oid = SYSCTL_ADD_NODE(NULL,
303 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
304 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
305 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
306 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
307 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
308 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
313 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
317 struct umtxq_chain *uc;
318 u_int fract, i, j, tot, whole;
319 u_int sf0, sf1, sf2, sf3, sf4;
320 u_int si0, si1, si2, si3, si4;
321 u_int sw0, sw1, sw2, sw3, sw4;
323 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
324 for (i = 0; i < 2; i++) {
326 for (j = 0; j < UMTX_CHAINS; ++j) {
327 uc = &umtxq_chains[i][j];
328 mtx_lock(&uc->uc_lock);
329 tot += uc->max_length;
330 mtx_unlock(&uc->uc_lock);
333 sbuf_printf(&sb, "%u) Empty ", i);
335 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
336 si0 = si1 = si2 = si3 = si4 = 0;
337 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
338 for (j = 0; j < UMTX_CHAINS; j++) {
339 uc = &umtxq_chains[i][j];
340 mtx_lock(&uc->uc_lock);
341 whole = uc->max_length * 100;
342 mtx_unlock(&uc->uc_lock);
343 fract = (whole % tot) * 100;
344 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
348 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
353 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
358 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
363 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
370 sbuf_printf(&sb, "queue %u:\n", i);
371 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
373 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
375 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
377 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
379 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
385 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
391 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
393 struct umtxq_chain *uc;
398 error = sysctl_handle_int(oidp, &clear, 0, req);
399 if (error != 0 || req->newptr == NULL)
403 for (i = 0; i < 2; ++i) {
404 for (j = 0; j < UMTX_CHAINS; ++j) {
405 uc = &umtxq_chains[i][j];
406 mtx_lock(&uc->uc_lock);
409 mtx_unlock(&uc->uc_lock);
416 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
417 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
418 sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics");
419 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
420 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
421 sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length");
425 umtxq_sysinit(void *arg __unused)
429 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
430 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
431 for (i = 0; i < 2; ++i) {
432 for (j = 0; j < UMTX_CHAINS; ++j) {
433 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
434 MTX_DEF | MTX_DUPOK);
435 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
436 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
437 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
438 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
439 umtxq_chains[i][j].uc_busy = 0;
440 umtxq_chains[i][j].uc_waiters = 0;
441 #ifdef UMTX_PROFILING
442 umtxq_chains[i][j].length = 0;
443 umtxq_chains[i][j].max_length = 0;
447 #ifdef UMTX_PROFILING
448 umtx_init_profiling();
450 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
451 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
452 EVENTHANDLER_PRI_ANY);
461 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
462 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
464 TAILQ_INIT(&uq->uq_spare_queue->head);
465 TAILQ_INIT(&uq->uq_pi_contested);
466 uq->uq_inherited_pri = PRI_MAX;
471 umtxq_free(struct umtx_q *uq)
474 MPASS(uq->uq_spare_queue != NULL);
475 free(uq->uq_spare_queue, M_UMTX);
480 umtxq_hash(struct umtx_key *key)
484 n = (uintptr_t)key->info.both.a + key->info.both.b;
485 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
488 static inline struct umtxq_chain *
489 umtxq_getchain(struct umtx_key *key)
492 if (key->type <= TYPE_SEM)
493 return (&umtxq_chains[1][key->hash]);
494 return (&umtxq_chains[0][key->hash]);
501 umtxq_lock(struct umtx_key *key)
503 struct umtxq_chain *uc;
505 uc = umtxq_getchain(key);
506 mtx_lock(&uc->uc_lock);
513 umtxq_unlock(struct umtx_key *key)
515 struct umtxq_chain *uc;
517 uc = umtxq_getchain(key);
518 mtx_unlock(&uc->uc_lock);
522 * Set chain to busy state when following operation
523 * may be blocked (kernel mutex can not be used).
526 umtxq_busy(struct umtx_key *key)
528 struct umtxq_chain *uc;
530 uc = umtxq_getchain(key);
531 mtx_assert(&uc->uc_lock, MA_OWNED);
535 int count = BUSY_SPINS;
538 while (uc->uc_busy && --count > 0)
544 while (uc->uc_busy) {
546 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
557 umtxq_unbusy(struct umtx_key *key)
559 struct umtxq_chain *uc;
561 uc = umtxq_getchain(key);
562 mtx_assert(&uc->uc_lock, MA_OWNED);
563 KASSERT(uc->uc_busy != 0, ("not busy"));
570 umtxq_unbusy_unlocked(struct umtx_key *key)
578 static struct umtxq_queue *
579 umtxq_queue_lookup(struct umtx_key *key, int q)
581 struct umtxq_queue *uh;
582 struct umtxq_chain *uc;
584 uc = umtxq_getchain(key);
585 UMTXQ_LOCKED_ASSERT(uc);
586 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
587 if (umtx_key_match(&uh->key, key))
595 umtxq_insert_queue(struct umtx_q *uq, int q)
597 struct umtxq_queue *uh;
598 struct umtxq_chain *uc;
600 uc = umtxq_getchain(&uq->uq_key);
601 UMTXQ_LOCKED_ASSERT(uc);
602 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
603 uh = umtxq_queue_lookup(&uq->uq_key, q);
605 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
607 uh = uq->uq_spare_queue;
608 uh->key = uq->uq_key;
609 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
610 #ifdef UMTX_PROFILING
612 if (uc->length > uc->max_length) {
613 uc->max_length = uc->length;
614 if (uc->max_length > max_length)
615 max_length = uc->max_length;
619 uq->uq_spare_queue = NULL;
621 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
623 uq->uq_flags |= UQF_UMTXQ;
624 uq->uq_cur_queue = uh;
629 umtxq_remove_queue(struct umtx_q *uq, int q)
631 struct umtxq_chain *uc;
632 struct umtxq_queue *uh;
634 uc = umtxq_getchain(&uq->uq_key);
635 UMTXQ_LOCKED_ASSERT(uc);
636 if (uq->uq_flags & UQF_UMTXQ) {
637 uh = uq->uq_cur_queue;
638 TAILQ_REMOVE(&uh->head, uq, uq_link);
640 uq->uq_flags &= ~UQF_UMTXQ;
641 if (TAILQ_EMPTY(&uh->head)) {
642 KASSERT(uh->length == 0,
643 ("inconsistent umtxq_queue length"));
644 #ifdef UMTX_PROFILING
647 LIST_REMOVE(uh, link);
649 uh = LIST_FIRST(&uc->uc_spare_queue);
650 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
651 LIST_REMOVE(uh, link);
653 uq->uq_spare_queue = uh;
654 uq->uq_cur_queue = NULL;
659 * Check if there are multiple waiters
662 umtxq_count(struct umtx_key *key)
664 struct umtxq_chain *uc;
665 struct umtxq_queue *uh;
667 uc = umtxq_getchain(key);
668 UMTXQ_LOCKED_ASSERT(uc);
669 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
676 * Check if there are multiple PI waiters and returns first
680 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
682 struct umtxq_chain *uc;
683 struct umtxq_queue *uh;
686 uc = umtxq_getchain(key);
687 UMTXQ_LOCKED_ASSERT(uc);
688 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
690 *first = TAILQ_FIRST(&uh->head);
697 umtxq_check_susp(struct thread *td)
703 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
704 * eventually break the lockstep loop.
706 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
711 if (P_SHOULDSTOP(p) ||
712 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
713 if (p->p_flag & P_SINGLE_EXIT)
723 * Wake up threads waiting on an userland object.
727 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
729 struct umtxq_chain *uc;
730 struct umtxq_queue *uh;
735 uc = umtxq_getchain(key);
736 UMTXQ_LOCKED_ASSERT(uc);
737 uh = umtxq_queue_lookup(key, q);
739 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
740 umtxq_remove_queue(uq, q);
751 * Wake up specified thread.
754 umtxq_signal_thread(struct umtx_q *uq)
756 struct umtxq_chain *uc;
758 uc = umtxq_getchain(&uq->uq_key);
759 UMTXQ_LOCKED_ASSERT(uc);
765 tstohz(const struct timespec *tsp)
769 TIMESPEC_TO_TIMEVAL(&tv, tsp);
774 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
775 const struct timespec *timeout)
778 timo->clockid = clockid;
780 timo->is_abs_real = false;
781 abs_timeout_update(timo);
782 timo->end = timo->cur;
783 timespecadd(&timo->end, timeout);
785 timo->end = *timeout;
786 timo->is_abs_real = clockid == CLOCK_REALTIME ||
787 clockid == CLOCK_REALTIME_FAST ||
788 clockid == CLOCK_REALTIME_PRECISE;
790 * If is_abs_real, umtxq_sleep will read the clock
791 * after setting td_rtcgen; otherwise, read it here.
793 if (!timo->is_abs_real) {
794 abs_timeout_update(timo);
800 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
803 abs_timeout_init(timo, umtxtime->_clockid,
804 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
808 abs_timeout_update(struct abs_timeout *timo)
811 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
815 abs_timeout_gethz(struct abs_timeout *timo)
819 if (timespeccmp(&timo->end, &timo->cur, <=))
822 timespecsub(&tts, &timo->cur);
823 return (tstohz(&tts));
827 umtx_unlock_val(uint32_t flags, bool rb)
831 return (UMUTEX_RB_OWNERDEAD);
832 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
833 return (UMUTEX_RB_NOTRECOV);
835 return (UMUTEX_UNOWNED);
840 * Put thread into sleep state, before sleeping, check if
841 * thread was removed from umtx queue.
844 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
846 struct umtxq_chain *uc;
849 if (abstime != NULL && abstime->is_abs_real) {
850 curthread->td_rtcgen = atomic_load_acq_int(&rtc_generation);
851 abs_timeout_update(abstime);
854 uc = umtxq_getchain(&uq->uq_key);
855 UMTXQ_LOCKED_ASSERT(uc);
857 if (!(uq->uq_flags & UQF_UMTXQ)) {
861 if (abstime != NULL) {
862 timo = abs_timeout_gethz(abstime);
869 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
870 if (error == EINTR || error == ERESTART) {
871 umtxq_lock(&uq->uq_key);
874 if (abstime != NULL) {
875 if (abstime->is_abs_real)
876 curthread->td_rtcgen =
877 atomic_load_acq_int(&rtc_generation);
878 abs_timeout_update(abstime);
880 umtxq_lock(&uq->uq_key);
883 curthread->td_rtcgen = 0;
888 * Convert userspace address into unique logical address.
891 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
893 struct thread *td = curthread;
895 vm_map_entry_t entry;
901 if (share == THREAD_SHARE) {
903 key->info.private.vs = td->td_proc->p_vmspace;
904 key->info.private.addr = (uintptr_t)addr;
906 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
907 map = &td->td_proc->p_vmspace->vm_map;
908 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
909 &entry, &key->info.shared.object, &pindex, &prot,
910 &wired) != KERN_SUCCESS) {
914 if ((share == PROCESS_SHARE) ||
915 (share == AUTO_SHARE &&
916 VM_INHERIT_SHARE == entry->inheritance)) {
918 key->info.shared.offset = (vm_offset_t)addr -
919 entry->start + entry->offset;
920 vm_object_reference(key->info.shared.object);
923 key->info.private.vs = td->td_proc->p_vmspace;
924 key->info.private.addr = (uintptr_t)addr;
926 vm_map_lookup_done(map, entry);
937 umtx_key_release(struct umtx_key *key)
940 vm_object_deallocate(key->info.shared.object);
944 * Fetch and compare value, sleep on the address if value is not changed.
947 do_wait(struct thread *td, void *addr, u_long id,
948 struct _umtx_time *timeout, int compat32, int is_private)
950 struct abs_timeout timo;
957 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
958 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
962 abs_timeout_init2(&timo, timeout);
964 umtxq_lock(&uq->uq_key);
966 umtxq_unlock(&uq->uq_key);
968 error = fueword(addr, &tmp);
972 error = fueword32(addr, &tmp32);
978 umtxq_lock(&uq->uq_key);
981 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
983 if ((uq->uq_flags & UQF_UMTXQ) == 0)
987 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
990 umtxq_unlock(&uq->uq_key);
991 umtx_key_release(&uq->uq_key);
992 if (error == ERESTART)
998 * Wake up threads sleeping on the specified address.
1001 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1003 struct umtx_key key;
1006 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1007 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1010 umtxq_signal(&key, n_wake);
1012 umtx_key_release(&key);
1017 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1020 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1021 struct _umtx_time *timeout, int mode)
1023 struct abs_timeout timo;
1025 uint32_t owner, old, id;
1031 if (timeout != NULL)
1032 abs_timeout_init2(&timo, timeout);
1035 * Care must be exercised when dealing with umtx structure. It
1036 * can fault on any access.
1039 rv = fueword32(&m->m_owner, &owner);
1042 if (mode == _UMUTEX_WAIT) {
1043 if (owner == UMUTEX_UNOWNED ||
1044 owner == UMUTEX_CONTESTED ||
1045 owner == UMUTEX_RB_OWNERDEAD ||
1046 owner == UMUTEX_RB_NOTRECOV)
1050 * Robust mutex terminated. Kernel duty is to
1051 * return EOWNERDEAD to the userspace. The
1052 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1053 * by the common userspace code.
1055 if (owner == UMUTEX_RB_OWNERDEAD) {
1056 rv = casueword32(&m->m_owner,
1057 UMUTEX_RB_OWNERDEAD, &owner,
1058 id | UMUTEX_CONTESTED);
1061 if (owner == UMUTEX_RB_OWNERDEAD)
1062 return (EOWNERDEAD); /* success */
1063 rv = umtxq_check_susp(td);
1068 if (owner == UMUTEX_RB_NOTRECOV)
1069 return (ENOTRECOVERABLE);
1073 * Try the uncontested case. This should be
1076 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1078 /* The address was invalid. */
1082 /* The acquire succeeded. */
1083 if (owner == UMUTEX_UNOWNED)
1087 * If no one owns it but it is contested try
1090 if (owner == UMUTEX_CONTESTED) {
1091 rv = casueword32(&m->m_owner,
1092 UMUTEX_CONTESTED, &owner,
1093 id | UMUTEX_CONTESTED);
1094 /* The address was invalid. */
1098 if (owner == UMUTEX_CONTESTED)
1101 rv = umtxq_check_susp(td);
1106 * If this failed the lock has
1113 if (mode == _UMUTEX_TRY)
1117 * If we caught a signal, we have retried and now
1123 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1124 GET_SHARE(flags), &uq->uq_key)) != 0)
1127 umtxq_lock(&uq->uq_key);
1128 umtxq_busy(&uq->uq_key);
1130 umtxq_unlock(&uq->uq_key);
1133 * Set the contested bit so that a release in user space
1134 * knows to use the system call for unlock. If this fails
1135 * either some one else has acquired the lock or it has been
1138 rv = casueword32(&m->m_owner, owner, &old,
1139 owner | UMUTEX_CONTESTED);
1141 /* The address was invalid. */
1143 umtxq_lock(&uq->uq_key);
1145 umtxq_unbusy(&uq->uq_key);
1146 umtxq_unlock(&uq->uq_key);
1147 umtx_key_release(&uq->uq_key);
1152 * We set the contested bit, sleep. Otherwise the lock changed
1153 * and we need to retry or we lost a race to the thread
1154 * unlocking the umtx.
1156 umtxq_lock(&uq->uq_key);
1157 umtxq_unbusy(&uq->uq_key);
1159 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1162 umtxq_unlock(&uq->uq_key);
1163 umtx_key_release(&uq->uq_key);
1166 error = umtxq_check_susp(td);
1173 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1176 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1178 struct umtx_key key;
1179 uint32_t owner, old, id, newlock;
1184 * Make sure we own this mtx.
1186 error = fueword32(&m->m_owner, &owner);
1190 if ((owner & ~UMUTEX_CONTESTED) != id)
1193 newlock = umtx_unlock_val(flags, rb);
1194 if ((owner & UMUTEX_CONTESTED) == 0) {
1195 error = casueword32(&m->m_owner, owner, &old, newlock);
1203 /* We should only ever be in here for contested locks */
1204 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1210 count = umtxq_count(&key);
1214 * When unlocking the umtx, it must be marked as unowned if
1215 * there is zero or one thread only waiting for it.
1216 * Otherwise, it must be marked as contested.
1219 newlock |= UMUTEX_CONTESTED;
1220 error = casueword32(&m->m_owner, owner, &old, newlock);
1222 umtxq_signal(&key, 1);
1225 umtx_key_release(&key);
1234 * Check if the mutex is available and wake up a waiter,
1235 * only for simple mutex.
1238 do_wake_umutex(struct thread *td, struct umutex *m)
1240 struct umtx_key key;
1246 error = fueword32(&m->m_owner, &owner);
1250 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1251 owner != UMUTEX_RB_NOTRECOV)
1254 error = fueword32(&m->m_flags, &flags);
1258 /* We should only ever be in here for contested locks */
1259 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1265 count = umtxq_count(&key);
1268 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1269 owner != UMUTEX_RB_NOTRECOV) {
1270 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1277 if (error == 0 && count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1278 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1279 umtxq_signal(&key, 1);
1282 umtx_key_release(&key);
1287 * Check if the mutex has waiters and tries to fix contention bit.
1290 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1292 struct umtx_key key;
1293 uint32_t owner, old;
1298 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1302 type = TYPE_NORMAL_UMUTEX;
1304 case UMUTEX_PRIO_INHERIT:
1305 type = TYPE_PI_UMUTEX;
1307 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1308 type = TYPE_PI_ROBUST_UMUTEX;
1310 case UMUTEX_PRIO_PROTECT:
1311 type = TYPE_PP_UMUTEX;
1313 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1314 type = TYPE_PP_ROBUST_UMUTEX;
1319 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1325 count = umtxq_count(&key);
1328 * Only repair contention bit if there is a waiter, this means the mutex
1329 * is still being referenced by userland code, otherwise don't update
1333 error = fueword32(&m->m_owner, &owner);
1336 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0) {
1337 error = casueword32(&m->m_owner, owner, &old,
1338 owner | UMUTEX_CONTESTED);
1346 error = umtxq_check_susp(td);
1350 } else if (count == 1) {
1351 error = fueword32(&m->m_owner, &owner);
1354 while (error == 0 && (owner & ~UMUTEX_CONTESTED) != 0 &&
1355 (owner & UMUTEX_CONTESTED) == 0) {
1356 error = casueword32(&m->m_owner, owner, &old,
1357 owner | UMUTEX_CONTESTED);
1365 error = umtxq_check_susp(td);
1371 if (error == EFAULT) {
1372 umtxq_signal(&key, INT_MAX);
1373 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1374 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1375 umtxq_signal(&key, 1);
1378 umtx_key_release(&key);
1382 static inline struct umtx_pi *
1383 umtx_pi_alloc(int flags)
1387 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1388 TAILQ_INIT(&pi->pi_blocked);
1389 atomic_add_int(&umtx_pi_allocated, 1);
1394 umtx_pi_free(struct umtx_pi *pi)
1396 uma_zfree(umtx_pi_zone, pi);
1397 atomic_add_int(&umtx_pi_allocated, -1);
1401 * Adjust the thread's position on a pi_state after its priority has been
1405 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1407 struct umtx_q *uq, *uq1, *uq2;
1410 mtx_assert(&umtx_lock, MA_OWNED);
1417 * Check if the thread needs to be moved on the blocked chain.
1418 * It needs to be moved if either its priority is lower than
1419 * the previous thread or higher than the next thread.
1421 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1422 uq2 = TAILQ_NEXT(uq, uq_lockq);
1423 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1424 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1426 * Remove thread from blocked chain and determine where
1427 * it should be moved to.
1429 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1430 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1431 td1 = uq1->uq_thread;
1432 MPASS(td1->td_proc->p_magic == P_MAGIC);
1433 if (UPRI(td1) > UPRI(td))
1438 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1440 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1445 static struct umtx_pi *
1446 umtx_pi_next(struct umtx_pi *pi)
1448 struct umtx_q *uq_owner;
1450 if (pi->pi_owner == NULL)
1452 uq_owner = pi->pi_owner->td_umtxq;
1453 if (uq_owner == NULL)
1455 return (uq_owner->uq_pi_blocked);
1459 * Floyd's Cycle-Finding Algorithm.
1462 umtx_pi_check_loop(struct umtx_pi *pi)
1464 struct umtx_pi *pi1; /* fast iterator */
1466 mtx_assert(&umtx_lock, MA_OWNED);
1471 pi = umtx_pi_next(pi);
1474 pi1 = umtx_pi_next(pi1);
1477 pi1 = umtx_pi_next(pi1);
1487 * Propagate priority when a thread is blocked on POSIX
1491 umtx_propagate_priority(struct thread *td)
1497 mtx_assert(&umtx_lock, MA_OWNED);
1500 pi = uq->uq_pi_blocked;
1503 if (umtx_pi_check_loop(pi))
1508 if (td == NULL || td == curthread)
1511 MPASS(td->td_proc != NULL);
1512 MPASS(td->td_proc->p_magic == P_MAGIC);
1515 if (td->td_lend_user_pri > pri)
1516 sched_lend_user_prio(td, pri);
1524 * Pick up the lock that td is blocked on.
1527 pi = uq->uq_pi_blocked;
1530 /* Resort td on the list if needed. */
1531 umtx_pi_adjust_thread(pi, td);
1536 * Unpropagate priority for a PI mutex when a thread blocked on
1537 * it is interrupted by signal or resumed by others.
1540 umtx_repropagate_priority(struct umtx_pi *pi)
1542 struct umtx_q *uq, *uq_owner;
1543 struct umtx_pi *pi2;
1546 mtx_assert(&umtx_lock, MA_OWNED);
1548 if (umtx_pi_check_loop(pi))
1550 while (pi != NULL && pi->pi_owner != NULL) {
1552 uq_owner = pi->pi_owner->td_umtxq;
1554 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1555 uq = TAILQ_FIRST(&pi2->pi_blocked);
1557 if (pri > UPRI(uq->uq_thread))
1558 pri = UPRI(uq->uq_thread);
1562 if (pri > uq_owner->uq_inherited_pri)
1563 pri = uq_owner->uq_inherited_pri;
1564 thread_lock(pi->pi_owner);
1565 sched_lend_user_prio(pi->pi_owner, pri);
1566 thread_unlock(pi->pi_owner);
1567 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1568 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1573 * Insert a PI mutex into owned list.
1576 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1578 struct umtx_q *uq_owner;
1580 uq_owner = owner->td_umtxq;
1581 mtx_assert(&umtx_lock, MA_OWNED);
1582 MPASS(pi->pi_owner == NULL);
1583 pi->pi_owner = owner;
1584 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1589 * Disown a PI mutex, and remove it from the owned list.
1592 umtx_pi_disown(struct umtx_pi *pi)
1595 mtx_assert(&umtx_lock, MA_OWNED);
1596 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1597 pi->pi_owner = NULL;
1601 * Claim ownership of a PI mutex.
1604 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1609 mtx_lock(&umtx_lock);
1610 if (pi->pi_owner == owner) {
1611 mtx_unlock(&umtx_lock);
1615 if (pi->pi_owner != NULL) {
1617 * userland may have already messed the mutex, sigh.
1619 mtx_unlock(&umtx_lock);
1622 umtx_pi_setowner(pi, owner);
1623 uq = TAILQ_FIRST(&pi->pi_blocked);
1625 pri = UPRI(uq->uq_thread);
1627 if (pri < UPRI(owner))
1628 sched_lend_user_prio(owner, pri);
1629 thread_unlock(owner);
1631 mtx_unlock(&umtx_lock);
1636 * Adjust a thread's order position in its blocked PI mutex,
1637 * this may result new priority propagating process.
1640 umtx_pi_adjust(struct thread *td, u_char oldpri)
1646 mtx_lock(&umtx_lock);
1648 * Pick up the lock that td is blocked on.
1650 pi = uq->uq_pi_blocked;
1652 umtx_pi_adjust_thread(pi, td);
1653 umtx_repropagate_priority(pi);
1655 mtx_unlock(&umtx_lock);
1659 * Sleep on a PI mutex.
1662 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1663 const char *wmesg, struct abs_timeout *timo, bool shared)
1665 struct umtxq_chain *uc;
1666 struct thread *td, *td1;
1672 KASSERT(td == curthread, ("inconsistent uq_thread"));
1673 uc = umtxq_getchain(&uq->uq_key);
1674 UMTXQ_LOCKED_ASSERT(uc);
1675 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1677 mtx_lock(&umtx_lock);
1678 if (pi->pi_owner == NULL) {
1679 mtx_unlock(&umtx_lock);
1680 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
1681 mtx_lock(&umtx_lock);
1683 if (pi->pi_owner == NULL)
1684 umtx_pi_setowner(pi, td1);
1685 PROC_UNLOCK(td1->td_proc);
1689 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1690 pri = UPRI(uq1->uq_thread);
1696 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1698 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1700 uq->uq_pi_blocked = pi;
1702 td->td_flags |= TDF_UPIBLOCKED;
1704 umtx_propagate_priority(td);
1705 mtx_unlock(&umtx_lock);
1706 umtxq_unbusy(&uq->uq_key);
1708 error = umtxq_sleep(uq, wmesg, timo);
1711 mtx_lock(&umtx_lock);
1712 uq->uq_pi_blocked = NULL;
1714 td->td_flags &= ~TDF_UPIBLOCKED;
1716 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1717 umtx_repropagate_priority(pi);
1718 mtx_unlock(&umtx_lock);
1719 umtxq_unlock(&uq->uq_key);
1725 * Add reference count for a PI mutex.
1728 umtx_pi_ref(struct umtx_pi *pi)
1730 struct umtxq_chain *uc;
1732 uc = umtxq_getchain(&pi->pi_key);
1733 UMTXQ_LOCKED_ASSERT(uc);
1738 * Decrease reference count for a PI mutex, if the counter
1739 * is decreased to zero, its memory space is freed.
1742 umtx_pi_unref(struct umtx_pi *pi)
1744 struct umtxq_chain *uc;
1746 uc = umtxq_getchain(&pi->pi_key);
1747 UMTXQ_LOCKED_ASSERT(uc);
1748 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1749 if (--pi->pi_refcount == 0) {
1750 mtx_lock(&umtx_lock);
1751 if (pi->pi_owner != NULL)
1753 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1754 ("blocked queue not empty"));
1755 mtx_unlock(&umtx_lock);
1756 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1762 * Find a PI mutex in hash table.
1764 static struct umtx_pi *
1765 umtx_pi_lookup(struct umtx_key *key)
1767 struct umtxq_chain *uc;
1770 uc = umtxq_getchain(key);
1771 UMTXQ_LOCKED_ASSERT(uc);
1773 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1774 if (umtx_key_match(&pi->pi_key, key)) {
1782 * Insert a PI mutex into hash table.
1785 umtx_pi_insert(struct umtx_pi *pi)
1787 struct umtxq_chain *uc;
1789 uc = umtxq_getchain(&pi->pi_key);
1790 UMTXQ_LOCKED_ASSERT(uc);
1791 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1798 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1799 struct _umtx_time *timeout, int try)
1801 struct abs_timeout timo;
1803 struct umtx_pi *pi, *new_pi;
1804 uint32_t id, old_owner, owner, old;
1810 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
1811 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
1815 if (timeout != NULL)
1816 abs_timeout_init2(&timo, timeout);
1818 umtxq_lock(&uq->uq_key);
1819 pi = umtx_pi_lookup(&uq->uq_key);
1821 new_pi = umtx_pi_alloc(M_NOWAIT);
1822 if (new_pi == NULL) {
1823 umtxq_unlock(&uq->uq_key);
1824 new_pi = umtx_pi_alloc(M_WAITOK);
1825 umtxq_lock(&uq->uq_key);
1826 pi = umtx_pi_lookup(&uq->uq_key);
1828 umtx_pi_free(new_pi);
1832 if (new_pi != NULL) {
1833 new_pi->pi_key = uq->uq_key;
1834 umtx_pi_insert(new_pi);
1839 umtxq_unlock(&uq->uq_key);
1842 * Care must be exercised when dealing with umtx structure. It
1843 * can fault on any access.
1847 * Try the uncontested case. This should be done in userland.
1849 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
1850 /* The address was invalid. */
1856 /* The acquire succeeded. */
1857 if (owner == UMUTEX_UNOWNED) {
1862 if (owner == UMUTEX_RB_NOTRECOV) {
1863 error = ENOTRECOVERABLE;
1867 /* If no one owns it but it is contested try to acquire it. */
1868 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
1870 rv = casueword32(&m->m_owner, owner, &owner,
1871 id | UMUTEX_CONTESTED);
1872 /* The address was invalid. */
1878 if (owner == old_owner) {
1879 umtxq_lock(&uq->uq_key);
1880 umtxq_busy(&uq->uq_key);
1881 error = umtx_pi_claim(pi, td);
1882 umtxq_unbusy(&uq->uq_key);
1883 umtxq_unlock(&uq->uq_key);
1886 * Since we're going to return an
1887 * error, restore the m_owner to its
1888 * previous, unowned state to avoid
1889 * compounding the problem.
1891 (void)casuword32(&m->m_owner,
1892 id | UMUTEX_CONTESTED,
1896 old_owner == UMUTEX_RB_OWNERDEAD)
1901 error = umtxq_check_susp(td);
1905 /* If this failed the lock has changed, restart. */
1909 if ((owner & ~UMUTEX_CONTESTED) == id) {
1920 * If we caught a signal, we have retried and now
1926 umtxq_lock(&uq->uq_key);
1927 umtxq_busy(&uq->uq_key);
1928 umtxq_unlock(&uq->uq_key);
1931 * Set the contested bit so that a release in user space
1932 * knows to use the system call for unlock. If this fails
1933 * either some one else has acquired the lock or it has been
1936 rv = casueword32(&m->m_owner, owner, &old, owner |
1939 /* The address was invalid. */
1941 umtxq_unbusy_unlocked(&uq->uq_key);
1946 umtxq_lock(&uq->uq_key);
1948 * We set the contested bit, sleep. Otherwise the lock changed
1949 * and we need to retry or we lost a race to the thread
1950 * unlocking the umtx. Note that the UMUTEX_RB_OWNERDEAD
1951 * value for owner is impossible there.
1954 error = umtxq_sleep_pi(uq, pi,
1955 owner & ~UMUTEX_CONTESTED,
1956 "umtxpi", timeout == NULL ? NULL : &timo,
1957 (flags & USYNC_PROCESS_SHARED) != 0);
1961 umtxq_unbusy(&uq->uq_key);
1962 umtxq_unlock(&uq->uq_key);
1965 error = umtxq_check_susp(td);
1970 umtxq_lock(&uq->uq_key);
1972 umtxq_unlock(&uq->uq_key);
1974 umtx_key_release(&uq->uq_key);
1979 * Unlock a PI mutex.
1982 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1984 struct umtx_key key;
1985 struct umtx_q *uq_first, *uq_first2, *uq_me;
1986 struct umtx_pi *pi, *pi2;
1987 uint32_t id, new_owner, old, owner;
1988 int count, error, pri;
1992 * Make sure we own this mtx.
1994 error = fueword32(&m->m_owner, &owner);
1998 if ((owner & ~UMUTEX_CONTESTED) != id)
2001 new_owner = umtx_unlock_val(flags, rb);
2003 /* This should be done in userland */
2004 if ((owner & UMUTEX_CONTESTED) == 0) {
2005 error = casueword32(&m->m_owner, owner, &old, new_owner);
2013 /* We should only ever be in here for contested locks */
2014 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2015 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2021 count = umtxq_count_pi(&key, &uq_first);
2022 if (uq_first != NULL) {
2023 mtx_lock(&umtx_lock);
2024 pi = uq_first->uq_pi_blocked;
2025 KASSERT(pi != NULL, ("pi == NULL?"));
2026 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2027 mtx_unlock(&umtx_lock);
2030 umtx_key_release(&key);
2031 /* userland messed the mutex */
2034 uq_me = td->td_umtxq;
2035 if (pi->pi_owner == td)
2037 /* get highest priority thread which is still sleeping. */
2038 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2039 while (uq_first != NULL &&
2040 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2041 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2044 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2045 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2046 if (uq_first2 != NULL) {
2047 if (pri > UPRI(uq_first2->uq_thread))
2048 pri = UPRI(uq_first2->uq_thread);
2052 sched_lend_user_prio(td, pri);
2054 mtx_unlock(&umtx_lock);
2056 umtxq_signal_thread(uq_first);
2058 pi = umtx_pi_lookup(&key);
2060 * A umtx_pi can exist if a signal or timeout removed the
2061 * last waiter from the umtxq, but there is still
2062 * a thread in do_lock_pi() holding the umtx_pi.
2066 * The umtx_pi can be unowned, such as when a thread
2067 * has just entered do_lock_pi(), allocated the
2068 * umtx_pi, and unlocked the umtxq.
2069 * If the current thread owns it, it must disown it.
2071 mtx_lock(&umtx_lock);
2072 if (pi->pi_owner == td)
2074 mtx_unlock(&umtx_lock);
2080 * When unlocking the umtx, it must be marked as unowned if
2081 * there is zero or one thread only waiting for it.
2082 * Otherwise, it must be marked as contested.
2086 new_owner |= UMUTEX_CONTESTED;
2087 error = casueword32(&m->m_owner, owner, &old, new_owner);
2089 umtxq_unbusy_unlocked(&key);
2090 umtx_key_release(&key);
2102 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2103 struct _umtx_time *timeout, int try)
2105 struct abs_timeout timo;
2106 struct umtx_q *uq, *uq2;
2110 int error, pri, old_inherited_pri, su, rv;
2114 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2115 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2119 if (timeout != NULL)
2120 abs_timeout_init2(&timo, timeout);
2122 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2124 old_inherited_pri = uq->uq_inherited_pri;
2125 umtxq_lock(&uq->uq_key);
2126 umtxq_busy(&uq->uq_key);
2127 umtxq_unlock(&uq->uq_key);
2129 rv = fueword32(&m->m_ceilings[0], &ceiling);
2134 ceiling = RTP_PRIO_MAX - ceiling;
2135 if (ceiling > RTP_PRIO_MAX) {
2140 mtx_lock(&umtx_lock);
2141 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2142 mtx_unlock(&umtx_lock);
2146 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2147 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2149 if (uq->uq_inherited_pri < UPRI(td))
2150 sched_lend_user_prio(td, uq->uq_inherited_pri);
2153 mtx_unlock(&umtx_lock);
2155 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2156 id | UMUTEX_CONTESTED);
2157 /* The address was invalid. */
2163 if (owner == UMUTEX_CONTESTED) {
2166 } else if (owner == UMUTEX_RB_OWNERDEAD) {
2167 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2168 &owner, id | UMUTEX_CONTESTED);
2173 if (owner == UMUTEX_RB_OWNERDEAD) {
2174 error = EOWNERDEAD; /* success */
2178 } else if (owner == UMUTEX_RB_NOTRECOV) {
2179 error = ENOTRECOVERABLE;
2189 * If we caught a signal, we have retried and now
2195 umtxq_lock(&uq->uq_key);
2197 umtxq_unbusy(&uq->uq_key);
2198 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2201 umtxq_unlock(&uq->uq_key);
2203 mtx_lock(&umtx_lock);
2204 uq->uq_inherited_pri = old_inherited_pri;
2206 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2207 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2209 if (pri > UPRI(uq2->uq_thread))
2210 pri = UPRI(uq2->uq_thread);
2213 if (pri > uq->uq_inherited_pri)
2214 pri = uq->uq_inherited_pri;
2216 sched_lend_user_prio(td, pri);
2218 mtx_unlock(&umtx_lock);
2221 if (error != 0 && error != EOWNERDEAD) {
2222 mtx_lock(&umtx_lock);
2223 uq->uq_inherited_pri = old_inherited_pri;
2225 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2226 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2228 if (pri > UPRI(uq2->uq_thread))
2229 pri = UPRI(uq2->uq_thread);
2232 if (pri > uq->uq_inherited_pri)
2233 pri = uq->uq_inherited_pri;
2235 sched_lend_user_prio(td, pri);
2237 mtx_unlock(&umtx_lock);
2241 umtxq_unbusy_unlocked(&uq->uq_key);
2242 umtx_key_release(&uq->uq_key);
2247 * Unlock a PP mutex.
2250 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2252 struct umtx_key key;
2253 struct umtx_q *uq, *uq2;
2255 uint32_t id, owner, rceiling;
2256 int error, pri, new_inherited_pri, su;
2260 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2263 * Make sure we own this mtx.
2265 error = fueword32(&m->m_owner, &owner);
2269 if ((owner & ~UMUTEX_CONTESTED) != id)
2272 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2277 new_inherited_pri = PRI_MAX;
2279 rceiling = RTP_PRIO_MAX - rceiling;
2280 if (rceiling > RTP_PRIO_MAX)
2282 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2285 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2286 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2293 * For priority protected mutex, always set unlocked state
2294 * to UMUTEX_CONTESTED, so that userland always enters kernel
2295 * to lock the mutex, it is necessary because thread priority
2296 * has to be adjusted for such mutex.
2298 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2303 umtxq_signal(&key, 1);
2310 mtx_lock(&umtx_lock);
2312 uq->uq_inherited_pri = new_inherited_pri;
2314 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2315 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2317 if (pri > UPRI(uq2->uq_thread))
2318 pri = UPRI(uq2->uq_thread);
2321 if (pri > uq->uq_inherited_pri)
2322 pri = uq->uq_inherited_pri;
2324 sched_lend_user_prio(td, pri);
2326 mtx_unlock(&umtx_lock);
2328 umtx_key_release(&key);
2333 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2334 uint32_t *old_ceiling)
2337 uint32_t flags, id, owner, save_ceiling;
2340 error = fueword32(&m->m_flags, &flags);
2343 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2345 if (ceiling > RTP_PRIO_MAX)
2349 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2350 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2354 umtxq_lock(&uq->uq_key);
2355 umtxq_busy(&uq->uq_key);
2356 umtxq_unlock(&uq->uq_key);
2358 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2364 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2365 id | UMUTEX_CONTESTED);
2371 if (owner == UMUTEX_CONTESTED) {
2372 rv = suword32(&m->m_ceilings[0], ceiling);
2373 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2374 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2378 if ((owner & ~UMUTEX_CONTESTED) == id) {
2379 rv = suword32(&m->m_ceilings[0], ceiling);
2380 error = rv == 0 ? 0 : EFAULT;
2384 if (owner == UMUTEX_RB_OWNERDEAD) {
2387 } else if (owner == UMUTEX_RB_NOTRECOV) {
2388 error = ENOTRECOVERABLE;
2393 * If we caught a signal, we have retried and now
2400 * We set the contested bit, sleep. Otherwise the lock changed
2401 * and we need to retry or we lost a race to the thread
2402 * unlocking the umtx.
2404 umtxq_lock(&uq->uq_key);
2406 umtxq_unbusy(&uq->uq_key);
2407 error = umtxq_sleep(uq, "umtxpp", NULL);
2409 umtxq_unlock(&uq->uq_key);
2411 umtxq_lock(&uq->uq_key);
2413 umtxq_signal(&uq->uq_key, INT_MAX);
2414 umtxq_unbusy(&uq->uq_key);
2415 umtxq_unlock(&uq->uq_key);
2416 umtx_key_release(&uq->uq_key);
2417 if (error == 0 && old_ceiling != NULL) {
2418 rv = suword32(old_ceiling, save_ceiling);
2419 error = rv == 0 ? 0 : EFAULT;
2425 * Lock a userland POSIX mutex.
2428 do_lock_umutex(struct thread *td, struct umutex *m,
2429 struct _umtx_time *timeout, int mode)
2434 error = fueword32(&m->m_flags, &flags);
2438 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2440 error = do_lock_normal(td, m, flags, timeout, mode);
2442 case UMUTEX_PRIO_INHERIT:
2443 error = do_lock_pi(td, m, flags, timeout, mode);
2445 case UMUTEX_PRIO_PROTECT:
2446 error = do_lock_pp(td, m, flags, timeout, mode);
2451 if (timeout == NULL) {
2452 if (error == EINTR && mode != _UMUTEX_WAIT)
2455 /* Timed-locking is not restarted. */
2456 if (error == ERESTART)
2463 * Unlock a userland POSIX mutex.
2466 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2471 error = fueword32(&m->m_flags, &flags);
2475 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2477 return (do_unlock_normal(td, m, flags, rb));
2478 case UMUTEX_PRIO_INHERIT:
2479 return (do_unlock_pi(td, m, flags, rb));
2480 case UMUTEX_PRIO_PROTECT:
2481 return (do_unlock_pp(td, m, flags, rb));
2488 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2489 struct timespec *timeout, u_long wflags)
2491 struct abs_timeout timo;
2493 uint32_t flags, clockid, hasw;
2497 error = fueword32(&cv->c_flags, &flags);
2500 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2504 if ((wflags & CVWAIT_CLOCKID) != 0) {
2505 error = fueword32(&cv->c_clockid, &clockid);
2507 umtx_key_release(&uq->uq_key);
2510 if (clockid < CLOCK_REALTIME ||
2511 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2512 /* hmm, only HW clock id will work. */
2513 umtx_key_release(&uq->uq_key);
2517 clockid = CLOCK_REALTIME;
2520 umtxq_lock(&uq->uq_key);
2521 umtxq_busy(&uq->uq_key);
2523 umtxq_unlock(&uq->uq_key);
2526 * Set c_has_waiters to 1 before releasing user mutex, also
2527 * don't modify cache line when unnecessary.
2529 error = fueword32(&cv->c_has_waiters, &hasw);
2530 if (error == 0 && hasw == 0)
2531 suword32(&cv->c_has_waiters, 1);
2533 umtxq_unbusy_unlocked(&uq->uq_key);
2535 error = do_unlock_umutex(td, m, false);
2537 if (timeout != NULL)
2538 abs_timeout_init(&timo, clockid, (wflags & CVWAIT_ABSTIME) != 0,
2541 umtxq_lock(&uq->uq_key);
2543 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2547 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2551 * This must be timeout,interrupted by signal or
2552 * surprious wakeup, clear c_has_waiter flag when
2555 umtxq_busy(&uq->uq_key);
2556 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2557 int oldlen = uq->uq_cur_queue->length;
2560 umtxq_unlock(&uq->uq_key);
2561 suword32(&cv->c_has_waiters, 0);
2562 umtxq_lock(&uq->uq_key);
2565 umtxq_unbusy(&uq->uq_key);
2566 if (error == ERESTART)
2570 umtxq_unlock(&uq->uq_key);
2571 umtx_key_release(&uq->uq_key);
2576 * Signal a userland condition variable.
2579 do_cv_signal(struct thread *td, struct ucond *cv)
2581 struct umtx_key key;
2582 int error, cnt, nwake;
2585 error = fueword32(&cv->c_flags, &flags);
2588 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2592 cnt = umtxq_count(&key);
2593 nwake = umtxq_signal(&key, 1);
2596 error = suword32(&cv->c_has_waiters, 0);
2603 umtx_key_release(&key);
2608 do_cv_broadcast(struct thread *td, struct ucond *cv)
2610 struct umtx_key key;
2614 error = fueword32(&cv->c_flags, &flags);
2617 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2622 umtxq_signal(&key, INT_MAX);
2625 error = suword32(&cv->c_has_waiters, 0);
2629 umtxq_unbusy_unlocked(&key);
2631 umtx_key_release(&key);
2636 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
2638 struct abs_timeout timo;
2640 uint32_t flags, wrflags;
2641 int32_t state, oldstate;
2642 int32_t blocked_readers;
2643 int error, error1, rv;
2646 error = fueword32(&rwlock->rw_flags, &flags);
2649 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2653 if (timeout != NULL)
2654 abs_timeout_init2(&timo, timeout);
2656 wrflags = URWLOCK_WRITE_OWNER;
2657 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2658 wrflags |= URWLOCK_WRITE_WAITERS;
2661 rv = fueword32(&rwlock->rw_state, &state);
2663 umtx_key_release(&uq->uq_key);
2667 /* try to lock it */
2668 while (!(state & wrflags)) {
2669 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2670 umtx_key_release(&uq->uq_key);
2673 rv = casueword32(&rwlock->rw_state, state,
2674 &oldstate, state + 1);
2676 umtx_key_release(&uq->uq_key);
2679 if (oldstate == state) {
2680 umtx_key_release(&uq->uq_key);
2683 error = umtxq_check_susp(td);
2692 /* grab monitor lock */
2693 umtxq_lock(&uq->uq_key);
2694 umtxq_busy(&uq->uq_key);
2695 umtxq_unlock(&uq->uq_key);
2698 * re-read the state, in case it changed between the try-lock above
2699 * and the check below
2701 rv = fueword32(&rwlock->rw_state, &state);
2705 /* set read contention bit */
2706 while (error == 0 && (state & wrflags) &&
2707 !(state & URWLOCK_READ_WAITERS)) {
2708 rv = casueword32(&rwlock->rw_state, state,
2709 &oldstate, state | URWLOCK_READ_WAITERS);
2714 if (oldstate == state)
2717 error = umtxq_check_susp(td);
2722 umtxq_unbusy_unlocked(&uq->uq_key);
2726 /* state is changed while setting flags, restart */
2727 if (!(state & wrflags)) {
2728 umtxq_unbusy_unlocked(&uq->uq_key);
2729 error = umtxq_check_susp(td);
2736 /* contention bit is set, before sleeping, increase read waiter count */
2737 rv = fueword32(&rwlock->rw_blocked_readers,
2740 umtxq_unbusy_unlocked(&uq->uq_key);
2744 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2746 while (state & wrflags) {
2747 umtxq_lock(&uq->uq_key);
2749 umtxq_unbusy(&uq->uq_key);
2751 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2754 umtxq_busy(&uq->uq_key);
2756 umtxq_unlock(&uq->uq_key);
2759 rv = fueword32(&rwlock->rw_state, &state);
2766 /* decrease read waiter count, and may clear read contention bit */
2767 rv = fueword32(&rwlock->rw_blocked_readers,
2770 umtxq_unbusy_unlocked(&uq->uq_key);
2774 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2775 if (blocked_readers == 1) {
2776 rv = fueword32(&rwlock->rw_state, &state);
2778 umtxq_unbusy_unlocked(&uq->uq_key);
2783 rv = casueword32(&rwlock->rw_state, state,
2784 &oldstate, state & ~URWLOCK_READ_WAITERS);
2789 if (oldstate == state)
2792 error1 = umtxq_check_susp(td);
2801 umtxq_unbusy_unlocked(&uq->uq_key);
2805 umtx_key_release(&uq->uq_key);
2806 if (error == ERESTART)
2812 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2814 struct abs_timeout timo;
2817 int32_t state, oldstate;
2818 int32_t blocked_writers;
2819 int32_t blocked_readers;
2820 int error, error1, rv;
2823 error = fueword32(&rwlock->rw_flags, &flags);
2826 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2830 if (timeout != NULL)
2831 abs_timeout_init2(&timo, timeout);
2833 blocked_readers = 0;
2835 rv = fueword32(&rwlock->rw_state, &state);
2837 umtx_key_release(&uq->uq_key);
2840 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2841 rv = casueword32(&rwlock->rw_state, state,
2842 &oldstate, state | URWLOCK_WRITE_OWNER);
2844 umtx_key_release(&uq->uq_key);
2847 if (oldstate == state) {
2848 umtx_key_release(&uq->uq_key);
2852 error = umtxq_check_susp(td);
2858 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2859 blocked_readers != 0) {
2860 umtxq_lock(&uq->uq_key);
2861 umtxq_busy(&uq->uq_key);
2862 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2863 umtxq_unbusy(&uq->uq_key);
2864 umtxq_unlock(&uq->uq_key);
2870 /* grab monitor lock */
2871 umtxq_lock(&uq->uq_key);
2872 umtxq_busy(&uq->uq_key);
2873 umtxq_unlock(&uq->uq_key);
2876 * re-read the state, in case it changed between the try-lock above
2877 * and the check below
2879 rv = fueword32(&rwlock->rw_state, &state);
2883 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
2884 URWLOCK_READER_COUNT(state) != 0) &&
2885 (state & URWLOCK_WRITE_WAITERS) == 0) {
2886 rv = casueword32(&rwlock->rw_state, state,
2887 &oldstate, state | URWLOCK_WRITE_WAITERS);
2892 if (oldstate == state)
2895 error = umtxq_check_susp(td);
2900 umtxq_unbusy_unlocked(&uq->uq_key);
2904 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2905 umtxq_unbusy_unlocked(&uq->uq_key);
2906 error = umtxq_check_susp(td);
2912 rv = fueword32(&rwlock->rw_blocked_writers,
2915 umtxq_unbusy_unlocked(&uq->uq_key);
2919 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2921 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2922 umtxq_lock(&uq->uq_key);
2923 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2924 umtxq_unbusy(&uq->uq_key);
2926 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2929 umtxq_busy(&uq->uq_key);
2930 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2931 umtxq_unlock(&uq->uq_key);
2934 rv = fueword32(&rwlock->rw_state, &state);
2941 rv = fueword32(&rwlock->rw_blocked_writers,
2944 umtxq_unbusy_unlocked(&uq->uq_key);
2948 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2949 if (blocked_writers == 1) {
2950 rv = fueword32(&rwlock->rw_state, &state);
2952 umtxq_unbusy_unlocked(&uq->uq_key);
2957 rv = casueword32(&rwlock->rw_state, state,
2958 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
2963 if (oldstate == state)
2966 error1 = umtxq_check_susp(td);
2968 * We are leaving the URWLOCK_WRITE_WAITERS
2969 * behind, but this should not harm the
2978 rv = fueword32(&rwlock->rw_blocked_readers,
2981 umtxq_unbusy_unlocked(&uq->uq_key);
2986 blocked_readers = 0;
2988 umtxq_unbusy_unlocked(&uq->uq_key);
2991 umtx_key_release(&uq->uq_key);
2992 if (error == ERESTART)
2998 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3002 int32_t state, oldstate;
3003 int error, rv, q, count;
3006 error = fueword32(&rwlock->rw_flags, &flags);
3009 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3013 error = fueword32(&rwlock->rw_state, &state);
3018 if (state & URWLOCK_WRITE_OWNER) {
3020 rv = casueword32(&rwlock->rw_state, state,
3021 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3026 if (oldstate != state) {
3028 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3032 error = umtxq_check_susp(td);
3038 } else if (URWLOCK_READER_COUNT(state) != 0) {
3040 rv = casueword32(&rwlock->rw_state, state,
3041 &oldstate, state - 1);
3046 if (oldstate != state) {
3048 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3052 error = umtxq_check_susp(td);
3065 if (!(flags & URWLOCK_PREFER_READER)) {
3066 if (state & URWLOCK_WRITE_WAITERS) {
3068 q = UMTX_EXCLUSIVE_QUEUE;
3069 } else if (state & URWLOCK_READ_WAITERS) {
3071 q = UMTX_SHARED_QUEUE;
3074 if (state & URWLOCK_READ_WAITERS) {
3076 q = UMTX_SHARED_QUEUE;
3077 } else if (state & URWLOCK_WRITE_WAITERS) {
3079 q = UMTX_EXCLUSIVE_QUEUE;
3084 umtxq_lock(&uq->uq_key);
3085 umtxq_busy(&uq->uq_key);
3086 umtxq_signal_queue(&uq->uq_key, count, q);
3087 umtxq_unbusy(&uq->uq_key);
3088 umtxq_unlock(&uq->uq_key);
3091 umtx_key_release(&uq->uq_key);
3095 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3097 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3099 struct abs_timeout timo;
3101 uint32_t flags, count, count1;
3105 error = fueword32(&sem->_flags, &flags);
3108 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3112 if (timeout != NULL)
3113 abs_timeout_init2(&timo, timeout);
3115 umtxq_lock(&uq->uq_key);
3116 umtxq_busy(&uq->uq_key);
3118 umtxq_unlock(&uq->uq_key);
3119 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3121 rv = fueword32(&sem->_count, &count);
3122 if (rv == -1 || count != 0) {
3123 umtxq_lock(&uq->uq_key);
3124 umtxq_unbusy(&uq->uq_key);
3126 umtxq_unlock(&uq->uq_key);
3127 umtx_key_release(&uq->uq_key);
3128 return (rv == -1 ? EFAULT : 0);
3130 umtxq_lock(&uq->uq_key);
3131 umtxq_unbusy(&uq->uq_key);
3133 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3135 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3139 /* A relative timeout cannot be restarted. */
3140 if (error == ERESTART && timeout != NULL &&
3141 (timeout->_flags & UMTX_ABSTIME) == 0)
3144 umtxq_unlock(&uq->uq_key);
3145 umtx_key_release(&uq->uq_key);
3150 * Signal a userland semaphore.
3153 do_sem_wake(struct thread *td, struct _usem *sem)
3155 struct umtx_key key;
3159 error = fueword32(&sem->_flags, &flags);
3162 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3166 cnt = umtxq_count(&key);
3169 * Check if count is greater than 0, this means the memory is
3170 * still being referenced by user code, so we can safely
3171 * update _has_waiters flag.
3175 error = suword32(&sem->_has_waiters, 0);
3180 umtxq_signal(&key, 1);
3184 umtx_key_release(&key);
3190 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3192 struct abs_timeout timo;
3194 uint32_t count, flags;
3198 flags = fuword32(&sem->_flags);
3199 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3203 if (timeout != NULL)
3204 abs_timeout_init2(&timo, timeout);
3206 umtxq_lock(&uq->uq_key);
3207 umtxq_busy(&uq->uq_key);
3209 umtxq_unlock(&uq->uq_key);
3210 rv = fueword32(&sem->_count, &count);
3212 umtxq_lock(&uq->uq_key);
3213 umtxq_unbusy(&uq->uq_key);
3215 umtxq_unlock(&uq->uq_key);
3216 umtx_key_release(&uq->uq_key);
3220 if (USEM_COUNT(count) != 0) {
3221 umtxq_lock(&uq->uq_key);
3222 umtxq_unbusy(&uq->uq_key);
3224 umtxq_unlock(&uq->uq_key);
3225 umtx_key_release(&uq->uq_key);
3228 if (count == USEM_HAS_WAITERS)
3230 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3232 umtxq_lock(&uq->uq_key);
3233 umtxq_unbusy(&uq->uq_key);
3235 umtxq_unlock(&uq->uq_key);
3236 umtx_key_release(&uq->uq_key);
3242 umtxq_lock(&uq->uq_key);
3243 umtxq_unbusy(&uq->uq_key);
3245 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3247 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3251 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3252 /* A relative timeout cannot be restarted. */
3253 if (error == ERESTART)
3255 if (error == EINTR) {
3256 abs_timeout_update(&timo);
3257 timeout->_timeout = timo.end;
3258 timespecsub(&timeout->_timeout, &timo.cur);
3262 umtxq_unlock(&uq->uq_key);
3263 umtx_key_release(&uq->uq_key);
3268 * Signal a userland semaphore.
3271 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3273 struct umtx_key key;
3275 uint32_t count, flags;
3277 rv = fueword32(&sem->_flags, &flags);
3280 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3284 cnt = umtxq_count(&key);
3287 * If this was the last sleeping thread, clear the waiters
3292 rv = fueword32(&sem->_count, &count);
3293 while (rv != -1 && count & USEM_HAS_WAITERS)
3294 rv = casueword32(&sem->_count, count, &count,
3295 count & ~USEM_HAS_WAITERS);
3301 umtxq_signal(&key, 1);
3305 umtx_key_release(&key);
3310 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
3314 error = copyin(addr, tsp, sizeof(struct timespec));
3316 if (tsp->tv_sec < 0 ||
3317 tsp->tv_nsec >= 1000000000 ||
3325 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
3329 if (size <= sizeof(struct timespec)) {
3330 tp->_clockid = CLOCK_REALTIME;
3332 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
3334 error = copyin(addr, tp, sizeof(struct _umtx_time));
3337 if (tp->_timeout.tv_sec < 0 ||
3338 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3344 __umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap)
3347 return (EOPNOTSUPP);
3351 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
3353 struct _umtx_time timeout, *tm_p;
3356 if (uap->uaddr2 == NULL)
3359 error = umtx_copyin_umtx_time(
3360 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3365 return (do_wait(td, uap->obj, uap->val, tm_p, 0, 0));
3369 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
3371 struct _umtx_time timeout, *tm_p;
3374 if (uap->uaddr2 == NULL)
3377 error = umtx_copyin_umtx_time(
3378 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3383 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3387 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3389 struct _umtx_time *tm_p, timeout;
3392 if (uap->uaddr2 == NULL)
3395 error = umtx_copyin_umtx_time(
3396 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3401 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3405 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3408 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3411 #define BATCH_SIZE 128
3413 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3415 char *uaddrs[BATCH_SIZE], **upp;
3416 int count, error, i, pos, tocopy;
3418 upp = (char **)uap->obj;
3420 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3422 tocopy = MIN(count, BATCH_SIZE);
3423 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3426 for (i = 0; i < tocopy; ++i)
3427 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3434 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3437 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3441 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3443 struct _umtx_time *tm_p, timeout;
3446 /* Allow a null timespec (wait forever). */
3447 if (uap->uaddr2 == NULL)
3450 error = umtx_copyin_umtx_time(
3451 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3456 return (do_lock_umutex(td, uap->obj, tm_p, 0));
3460 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3463 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
3467 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3469 struct _umtx_time *tm_p, timeout;
3472 /* Allow a null timespec (wait forever). */
3473 if (uap->uaddr2 == NULL)
3476 error = umtx_copyin_umtx_time(
3477 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3482 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
3486 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3489 return (do_wake_umutex(td, uap->obj));
3493 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3496 return (do_unlock_umutex(td, uap->obj, false));
3500 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3503 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
3507 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3509 struct timespec *ts, timeout;
3512 /* Allow a null timespec (wait forever). */
3513 if (uap->uaddr2 == NULL)
3516 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3521 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3525 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3528 return (do_cv_signal(td, uap->obj));
3532 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3535 return (do_cv_broadcast(td, uap->obj));
3539 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3541 struct _umtx_time timeout;
3544 /* Allow a null timespec (wait forever). */
3545 if (uap->uaddr2 == NULL) {
3546 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3548 error = umtx_copyin_umtx_time(uap->uaddr2,
3549 (size_t)uap->uaddr1, &timeout);
3552 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3558 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3560 struct _umtx_time timeout;
3563 /* Allow a null timespec (wait forever). */
3564 if (uap->uaddr2 == NULL) {
3565 error = do_rw_wrlock(td, uap->obj, 0);
3567 error = umtx_copyin_umtx_time(uap->uaddr2,
3568 (size_t)uap->uaddr1, &timeout);
3572 error = do_rw_wrlock(td, uap->obj, &timeout);
3578 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3581 return (do_rw_unlock(td, uap->obj));
3584 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3586 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3588 struct _umtx_time *tm_p, timeout;
3591 /* Allow a null timespec (wait forever). */
3592 if (uap->uaddr2 == NULL)
3595 error = umtx_copyin_umtx_time(
3596 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3601 return (do_sem_wait(td, uap->obj, tm_p));
3605 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3608 return (do_sem_wake(td, uap->obj));
3613 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3616 return (do_wake2_umutex(td, uap->obj, uap->val));
3620 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
3622 struct _umtx_time *tm_p, timeout;
3626 /* Allow a null timespec (wait forever). */
3627 if (uap->uaddr2 == NULL) {
3631 uasize = (size_t)uap->uaddr1;
3632 error = umtx_copyin_umtx_time(uap->uaddr2, uasize, &timeout);
3637 error = do_sem2_wait(td, uap->obj, tm_p);
3638 if (error == EINTR && uap->uaddr2 != NULL &&
3639 (timeout._flags & UMTX_ABSTIME) == 0 &&
3640 uasize >= sizeof(struct _umtx_time) + sizeof(struct timespec)) {
3641 error = copyout(&timeout._timeout,
3642 (struct _umtx_time *)uap->uaddr2 + 1,
3643 sizeof(struct timespec));
3653 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap)
3656 return (do_sem2_wake(td, uap->obj));
3659 #define USHM_OBJ_UMTX(o) \
3660 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
3662 #define USHMF_REG_LINKED 0x0001
3663 #define USHMF_OBJ_LINKED 0x0002
3664 struct umtx_shm_reg {
3665 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
3666 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
3667 struct umtx_key ushm_key;
3668 struct ucred *ushm_cred;
3669 struct shmfd *ushm_obj;
3674 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
3675 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
3677 static uma_zone_t umtx_shm_reg_zone;
3678 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
3679 static struct mtx umtx_shm_lock;
3680 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
3681 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
3683 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
3686 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
3688 struct umtx_shm_reg_head d;
3689 struct umtx_shm_reg *reg, *reg1;
3692 mtx_lock(&umtx_shm_lock);
3693 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
3694 mtx_unlock(&umtx_shm_lock);
3695 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
3696 TAILQ_REMOVE(&d, reg, ushm_reg_link);
3697 umtx_shm_free_reg(reg);
3701 static struct task umtx_shm_reg_delfree_task =
3702 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
3704 static struct umtx_shm_reg *
3705 umtx_shm_find_reg_locked(const struct umtx_key *key)
3707 struct umtx_shm_reg *reg;
3708 struct umtx_shm_reg_head *reg_head;
3710 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
3711 mtx_assert(&umtx_shm_lock, MA_OWNED);
3712 reg_head = &umtx_shm_registry[key->hash];
3713 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
3714 KASSERT(reg->ushm_key.shared,
3715 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
3716 if (reg->ushm_key.info.shared.object ==
3717 key->info.shared.object &&
3718 reg->ushm_key.info.shared.offset ==
3719 key->info.shared.offset) {
3720 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
3721 KASSERT(reg->ushm_refcnt > 0,
3722 ("reg %p refcnt 0 onlist", reg));
3723 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
3724 ("reg %p not linked", reg));
3732 static struct umtx_shm_reg *
3733 umtx_shm_find_reg(const struct umtx_key *key)
3735 struct umtx_shm_reg *reg;
3737 mtx_lock(&umtx_shm_lock);
3738 reg = umtx_shm_find_reg_locked(key);
3739 mtx_unlock(&umtx_shm_lock);
3744 umtx_shm_free_reg(struct umtx_shm_reg *reg)
3747 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
3748 crfree(reg->ushm_cred);
3749 shm_drop(reg->ushm_obj);
3750 uma_zfree(umtx_shm_reg_zone, reg);
3754 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
3758 mtx_assert(&umtx_shm_lock, MA_OWNED);
3759 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
3761 res = reg->ushm_refcnt == 0;
3763 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
3764 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
3765 reg, ushm_reg_link);
3766 reg->ushm_flags &= ~USHMF_REG_LINKED;
3768 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
3769 LIST_REMOVE(reg, ushm_obj_link);
3770 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
3777 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
3783 object = reg->ushm_obj->shm_object;
3784 VM_OBJECT_WLOCK(object);
3785 object->flags |= OBJ_UMTXDEAD;
3786 VM_OBJECT_WUNLOCK(object);
3788 mtx_lock(&umtx_shm_lock);
3789 dofree = umtx_shm_unref_reg_locked(reg, force);
3790 mtx_unlock(&umtx_shm_lock);
3792 umtx_shm_free_reg(reg);
3796 umtx_shm_object_init(vm_object_t object)
3799 LIST_INIT(USHM_OBJ_UMTX(object));
3803 umtx_shm_object_terminated(vm_object_t object)
3805 struct umtx_shm_reg *reg, *reg1;
3809 mtx_lock(&umtx_shm_lock);
3810 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
3811 if (umtx_shm_unref_reg_locked(reg, true)) {
3812 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
3817 mtx_unlock(&umtx_shm_lock);
3819 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
3823 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
3824 struct umtx_shm_reg **res)
3826 struct umtx_shm_reg *reg, *reg1;
3830 reg = umtx_shm_find_reg(key);
3835 cred = td->td_ucred;
3836 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
3838 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
3839 reg->ushm_refcnt = 1;
3840 bcopy(key, ®->ushm_key, sizeof(*key));
3841 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR);
3842 reg->ushm_cred = crhold(cred);
3843 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
3845 umtx_shm_free_reg(reg);
3848 mtx_lock(&umtx_shm_lock);
3849 reg1 = umtx_shm_find_reg_locked(key);
3851 mtx_unlock(&umtx_shm_lock);
3852 umtx_shm_free_reg(reg);
3857 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
3858 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
3860 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
3861 mtx_unlock(&umtx_shm_lock);
3867 umtx_shm_alive(struct thread *td, void *addr)
3870 vm_map_entry_t entry;
3877 map = &td->td_proc->p_vmspace->vm_map;
3878 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
3879 &object, &pindex, &prot, &wired);
3880 if (res != KERN_SUCCESS)
3885 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
3886 vm_map_lookup_done(map, entry);
3895 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
3896 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3897 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
3898 for (i = 0; i < nitems(umtx_shm_registry); i++)
3899 TAILQ_INIT(&umtx_shm_registry[i]);
3903 umtx_shm(struct thread *td, void *addr, u_int flags)
3905 struct umtx_key key;
3906 struct umtx_shm_reg *reg;
3910 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
3911 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
3913 if ((flags & UMTX_SHM_ALIVE) != 0)
3914 return (umtx_shm_alive(td, addr));
3915 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
3918 KASSERT(key.shared == 1, ("non-shared key"));
3919 if ((flags & UMTX_SHM_CREAT) != 0) {
3920 error = umtx_shm_create_reg(td, &key, ®);
3922 reg = umtx_shm_find_reg(&key);
3926 umtx_key_release(&key);
3929 KASSERT(reg != NULL, ("no reg"));
3930 if ((flags & UMTX_SHM_DESTROY) != 0) {
3931 umtx_shm_unref_reg(reg, true);
3935 error = mac_posixshm_check_open(td->td_ucred,
3936 reg->ushm_obj, FFLAGS(O_RDWR));
3939 error = shm_access(reg->ushm_obj, td->td_ucred,
3943 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
3945 shm_hold(reg->ushm_obj);
3946 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
3948 td->td_retval[0] = fd;
3952 umtx_shm_unref_reg(reg, false);
3957 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap)
3960 return (umtx_shm(td, uap->uaddr1, uap->val));
3964 umtx_robust_lists(struct thread *td, struct umtx_robust_lists_params *rbp)
3967 td->td_rb_list = rbp->robust_list_offset;
3968 td->td_rbp_list = rbp->robust_priv_list_offset;
3969 td->td_rb_inact = rbp->robust_inact_offset;
3974 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap)
3976 struct umtx_robust_lists_params rb;
3979 if (uap->val > sizeof(rb))
3981 bzero(&rb, sizeof(rb));
3982 error = copyin(uap->uaddr1, &rb, uap->val);
3985 return (umtx_robust_lists(td, &rb));
3988 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3990 static const _umtx_op_func op_table[] = {
3991 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
3992 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
3993 [UMTX_OP_WAIT] = __umtx_op_wait,
3994 [UMTX_OP_WAKE] = __umtx_op_wake,
3995 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
3996 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
3997 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
3998 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
3999 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4000 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4001 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4002 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4003 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4004 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4005 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4006 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4007 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4008 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4009 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4010 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4011 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4012 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4014 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4015 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4017 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4018 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4019 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4020 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4021 [UMTX_OP_SHM] = __umtx_op_shm,
4022 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4026 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4029 if ((unsigned)uap->op < nitems(op_table))
4030 return (*op_table[uap->op])(td, uap);
4034 #ifdef COMPAT_FREEBSD32
4041 struct umtx_time32 {
4042 struct timespec32 timeout;
4048 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
4050 struct timespec32 ts32;
4053 error = copyin(addr, &ts32, sizeof(struct timespec32));
4055 if (ts32.tv_sec < 0 ||
4056 ts32.tv_nsec >= 1000000000 ||
4060 tsp->tv_sec = ts32.tv_sec;
4061 tsp->tv_nsec = ts32.tv_nsec;
4068 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
4070 struct umtx_time32 t32;
4073 t32.clockid = CLOCK_REALTIME;
4075 if (size <= sizeof(struct timespec32))
4076 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
4078 error = copyin(addr, &t32, sizeof(struct umtx_time32));
4081 if (t32.timeout.tv_sec < 0 ||
4082 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
4084 tp->_timeout.tv_sec = t32.timeout.tv_sec;
4085 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
4086 tp->_flags = t32.flags;
4087 tp->_clockid = t32.clockid;
4092 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4094 struct _umtx_time *tm_p, timeout;
4097 if (uap->uaddr2 == NULL)
4100 error = umtx_copyin_umtx_time32(uap->uaddr2,
4101 (size_t)uap->uaddr1, &timeout);
4106 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
4110 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4112 struct _umtx_time *tm_p, timeout;
4115 /* Allow a null timespec (wait forever). */
4116 if (uap->uaddr2 == NULL)
4119 error = umtx_copyin_umtx_time(uap->uaddr2,
4120 (size_t)uap->uaddr1, &timeout);
4125 return (do_lock_umutex(td, uap->obj, tm_p, 0));
4129 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4131 struct _umtx_time *tm_p, timeout;
4134 /* Allow a null timespec (wait forever). */
4135 if (uap->uaddr2 == NULL)
4138 error = umtx_copyin_umtx_time32(uap->uaddr2,
4139 (size_t)uap->uaddr1, &timeout);
4144 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
4148 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4150 struct timespec *ts, timeout;
4153 /* Allow a null timespec (wait forever). */
4154 if (uap->uaddr2 == NULL)
4157 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
4162 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
4166 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4168 struct _umtx_time timeout;
4171 /* Allow a null timespec (wait forever). */
4172 if (uap->uaddr2 == NULL) {
4173 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
4175 error = umtx_copyin_umtx_time32(uap->uaddr2,
4176 (size_t)uap->uaddr1, &timeout);
4179 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4185 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4187 struct _umtx_time timeout;
4190 /* Allow a null timespec (wait forever). */
4191 if (uap->uaddr2 == NULL) {
4192 error = do_rw_wrlock(td, uap->obj, 0);
4194 error = umtx_copyin_umtx_time32(uap->uaddr2,
4195 (size_t)uap->uaddr1, &timeout);
4198 error = do_rw_wrlock(td, uap->obj, &timeout);
4204 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
4206 struct _umtx_time *tm_p, timeout;
4209 if (uap->uaddr2 == NULL)
4212 error = umtx_copyin_umtx_time32(
4213 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
4218 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
4221 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4223 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4225 struct _umtx_time *tm_p, timeout;
4228 /* Allow a null timespec (wait forever). */
4229 if (uap->uaddr2 == NULL)
4232 error = umtx_copyin_umtx_time32(uap->uaddr2,
4233 (size_t)uap->uaddr1, &timeout);
4238 return (do_sem_wait(td, uap->obj, tm_p));
4243 __umtx_op_sem2_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4245 struct _umtx_time *tm_p, timeout;
4249 /* Allow a null timespec (wait forever). */
4250 if (uap->uaddr2 == NULL) {
4254 uasize = (size_t)uap->uaddr1;
4255 error = umtx_copyin_umtx_time32(uap->uaddr2, uasize, &timeout);
4260 error = do_sem2_wait(td, uap->obj, tm_p);
4261 if (error == EINTR && uap->uaddr2 != NULL &&
4262 (timeout._flags & UMTX_ABSTIME) == 0 &&
4263 uasize >= sizeof(struct umtx_time32) + sizeof(struct timespec32)) {
4264 struct timespec32 remain32 = {
4265 .tv_sec = timeout._timeout.tv_sec,
4266 .tv_nsec = timeout._timeout.tv_nsec
4268 error = copyout(&remain32,
4269 (struct umtx_time32 *)uap->uaddr2 + 1,
4270 sizeof(struct timespec32));
4280 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
4282 uint32_t uaddrs[BATCH_SIZE], **upp;
4283 int count, error, i, pos, tocopy;
4285 upp = (uint32_t **)uap->obj;
4287 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
4289 tocopy = MIN(count, BATCH_SIZE);
4290 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
4293 for (i = 0; i < tocopy; ++i)
4294 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
4301 struct umtx_robust_lists_params_compat32 {
4302 uint32_t robust_list_offset;
4303 uint32_t robust_priv_list_offset;
4304 uint32_t robust_inact_offset;
4308 __umtx_op_robust_lists_compat32(struct thread *td, struct _umtx_op_args *uap)
4310 struct umtx_robust_lists_params rb;
4311 struct umtx_robust_lists_params_compat32 rb32;
4314 if (uap->val > sizeof(rb32))
4316 bzero(&rb, sizeof(rb));
4317 bzero(&rb32, sizeof(rb32));
4318 error = copyin(uap->uaddr1, &rb32, uap->val);
4321 rb.robust_list_offset = rb32.robust_list_offset;
4322 rb.robust_priv_list_offset = rb32.robust_priv_list_offset;
4323 rb.robust_inact_offset = rb32.robust_inact_offset;
4324 return (umtx_robust_lists(td, &rb));
4327 static const _umtx_op_func op_table_compat32[] = {
4328 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4329 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4330 [UMTX_OP_WAIT] = __umtx_op_wait_compat32,
4331 [UMTX_OP_WAKE] = __umtx_op_wake,
4332 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4333 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex_compat32,
4334 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4335 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4336 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait_compat32,
4337 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4338 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4339 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_compat32,
4340 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock_compat32,
4341 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock_compat32,
4342 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4343 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private_compat32,
4344 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4345 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex_compat32,
4346 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4347 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4348 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait_compat32,
4349 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4351 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4352 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4354 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private32,
4355 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4356 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait_compat32,
4357 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4358 [UMTX_OP_SHM] = __umtx_op_shm,
4359 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists_compat32,
4363 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
4366 if ((unsigned)uap->op < nitems(op_table_compat32)) {
4367 return (*op_table_compat32[uap->op])(td,
4368 (struct _umtx_op_args *)uap);
4375 umtx_thread_init(struct thread *td)
4378 td->td_umtxq = umtxq_alloc();
4379 td->td_umtxq->uq_thread = td;
4383 umtx_thread_fini(struct thread *td)
4386 umtxq_free(td->td_umtxq);
4390 * It will be called when new thread is created, e.g fork().
4393 umtx_thread_alloc(struct thread *td)
4398 uq->uq_inherited_pri = PRI_MAX;
4400 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4401 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4402 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4403 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4409 * Clear robust lists for all process' threads, not delaying the
4410 * cleanup to thread_exit hook, since the relevant address space is
4411 * destroyed right now.
4414 umtx_exec_hook(void *arg __unused, struct proc *p,
4415 struct image_params *imgp __unused)
4419 KASSERT(p == curproc, ("need curproc"));
4421 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4422 (p->p_flag & P_STOPPED_SINGLE) != 0,
4423 ("curproc must be single-threaded"));
4424 FOREACH_THREAD_IN_PROC(p, td) {
4425 KASSERT(td == curthread ||
4426 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4427 ("running thread %p %p", p, td));
4429 umtx_thread_cleanup(td);
4431 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4437 * thread_exit() hook.
4440 umtx_thread_exit(struct thread *td)
4443 umtx_thread_cleanup(td);
4447 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res)
4450 #ifdef COMPAT_FREEBSD32
4455 #ifdef COMPAT_FREEBSD32
4456 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4457 error = fueword32((void *)ptr, &res32);
4463 error = fueword((void *)ptr, &res1);
4473 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list)
4475 #ifdef COMPAT_FREEBSD32
4476 struct umutex32 m32;
4478 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4479 memcpy(&m32, m, sizeof(m32));
4480 *rb_list = m32.m_rb_lnk;
4483 *rb_list = m->m_rb_lnk;
4487 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact)
4492 KASSERT(td->td_proc == curproc, ("need current vmspace"));
4493 error = copyin((void *)rbp, &m, sizeof(m));
4496 if (rb_list != NULL)
4497 umtx_read_rb_list(td, &m, rb_list);
4498 if ((m.m_flags & UMUTEX_ROBUST) == 0)
4500 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
4501 /* inact is cleared after unlock, allow the inconsistency */
4502 return (inact ? 0 : EINVAL);
4503 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
4507 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
4516 error = umtx_read_uptr(td, rb_list, &rbp);
4517 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
4518 if (rbp == *rb_inact) {
4523 error = umtx_handle_rb(td, rbp, &rbp, inact);
4525 if (i == umtx_max_rb && umtx_verbose_rb) {
4526 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
4527 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
4529 if (error != 0 && umtx_verbose_rb) {
4530 uprintf("comm %s pid %d: handling %srb error %d\n",
4531 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
4536 * Clean up umtx data.
4539 umtx_thread_cleanup(struct thread *td)
4546 * Disown pi mutexes.
4550 mtx_lock(&umtx_lock);
4551 uq->uq_inherited_pri = PRI_MAX;
4552 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4553 pi->pi_owner = NULL;
4554 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4556 mtx_unlock(&umtx_lock);
4558 sched_lend_user_prio(td, PRI_MAX);
4563 * Handle terminated robust mutexes. Must be done after
4564 * robust pi disown, otherwise unlock could see unowned
4567 rb_inact = td->td_rb_inact;
4569 (void)umtx_read_uptr(td, rb_inact, &rb_inact);
4570 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "");
4571 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ");
4573 (void)umtx_handle_rb(td, rb_inact, NULL, true);