2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_umtx_profiling.h"
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
43 #include <sys/filedesc.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/taskqueue.h>
64 #include <sys/eventhandler.h>
67 #include <security/mac/mac_framework.h>
70 #include <vm/vm_param.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
75 #include <machine/atomic.h>
76 #include <machine/cpu.h>
78 #ifdef COMPAT_FREEBSD32
79 #include <compat/freebsd32/freebsd32_proto.h>
83 #define _UMUTEX_WAIT 2
86 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
87 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
90 /* Priority inheritance mutex info. */
93 struct thread *pi_owner;
98 /* List entry to link umtx holding by thread */
99 TAILQ_ENTRY(umtx_pi) pi_link;
101 /* List entry in hash */
102 TAILQ_ENTRY(umtx_pi) pi_hashlink;
104 /* List for waiters */
105 TAILQ_HEAD(,umtx_q) pi_blocked;
107 /* Identify a userland lock object */
108 struct umtx_key pi_key;
111 /* A userland synchronous object user. */
113 /* Linked list for the hash. */
114 TAILQ_ENTRY(umtx_q) uq_link;
117 struct umtx_key uq_key;
121 #define UQF_UMTXQ 0x0001
123 /* The thread waits on. */
124 struct thread *uq_thread;
127 * Blocked on PI mutex. read can use chain lock
128 * or umtx_lock, write must have both chain lock and
129 * umtx_lock being hold.
131 struct umtx_pi *uq_pi_blocked;
133 /* On blocked list */
134 TAILQ_ENTRY(umtx_q) uq_lockq;
136 /* Thread contending with us */
137 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
139 /* Inherited priority from PP mutex */
140 u_char uq_inherited_pri;
142 /* Spare queue ready to be reused */
143 struct umtxq_queue *uq_spare_queue;
145 /* The queue we on */
146 struct umtxq_queue *uq_cur_queue;
149 TAILQ_HEAD(umtxq_head, umtx_q);
151 /* Per-key wait-queue */
153 struct umtxq_head head;
155 LIST_ENTRY(umtxq_queue) link;
159 LIST_HEAD(umtxq_list, umtxq_queue);
161 /* Userland lock object's wait-queue chain */
163 /* Lock for this chain. */
166 /* List of sleep queues. */
167 struct umtxq_list uc_queue[2];
168 #define UMTX_SHARED_QUEUE 0
169 #define UMTX_EXCLUSIVE_QUEUE 1
171 LIST_HEAD(, umtxq_queue) uc_spare_queue;
176 /* Chain lock waiters */
179 /* All PI in the list */
180 TAILQ_HEAD(,umtx_pi) uc_pi_list;
182 #ifdef UMTX_PROFILING
188 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
191 * Don't propagate time-sharing priority, there is a security reason,
192 * a user can simply introduce PI-mutex, let thread A lock the mutex,
193 * and let another thread B block on the mutex, because B is
194 * sleeping, its priority will be boosted, this causes A's priority to
195 * be boosted via priority propagating too and will never be lowered even
196 * if it is using 100%CPU, this is unfair to other processes.
199 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
200 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
201 PRI_MAX_TIMESHARE : (td)->td_user_pri)
203 #define GOLDEN_RATIO_PRIME 2654404609U
205 #define UMTX_CHAINS 512
207 #define UMTX_SHIFTS (__WORD_BIT - 9)
209 #define GET_SHARE(flags) \
210 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
212 #define BUSY_SPINS 200
216 bool is_abs_real; /* TIMER_ABSTIME && CLOCK_REALTIME* */
221 #ifdef COMPAT_FREEBSD32
223 volatile __lwpid_t m_owner; /* Owner of the mutex */
224 __uint32_t m_flags; /* Flags of the mutex */
225 __uint32_t m_ceilings[2]; /* Priority protect ceiling */
226 __uint32_t m_rb_lnk; /* Robust linkage */
228 __uint32_t m_spare[2];
231 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
232 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
233 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
236 int umtx_shm_vnobj_persistent = 0;
237 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
238 &umtx_shm_vnobj_persistent, 0,
239 "False forces destruction of umtx attached to file, on last close");
240 static int umtx_max_rb = 1000;
241 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
245 static uma_zone_t umtx_pi_zone;
246 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
247 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
248 static int umtx_pi_allocated;
250 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
251 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
252 &umtx_pi_allocated, 0, "Allocated umtx_pi");
253 static int umtx_verbose_rb = 1;
254 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
258 #ifdef UMTX_PROFILING
259 static long max_length;
260 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
261 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
264 static void abs_timeout_update(struct abs_timeout *timo);
266 static void umtx_shm_init(void);
267 static void umtxq_sysinit(void *);
268 static void umtxq_hash(struct umtx_key *key);
269 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
270 static void umtxq_lock(struct umtx_key *key);
271 static void umtxq_unlock(struct umtx_key *key);
272 static void umtxq_busy(struct umtx_key *key);
273 static void umtxq_unbusy(struct umtx_key *key);
274 static void umtxq_insert_queue(struct umtx_q *uq, int q);
275 static void umtxq_remove_queue(struct umtx_q *uq, int q);
276 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
277 static int umtxq_count(struct umtx_key *key);
278 static struct umtx_pi *umtx_pi_alloc(int);
279 static void umtx_pi_free(struct umtx_pi *pi);
280 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
282 static void umtx_thread_cleanup(struct thread *td);
283 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
284 struct image_params *imgp __unused);
285 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
287 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
288 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
289 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
291 static struct mtx umtx_lock;
293 #ifdef UMTX_PROFILING
295 umtx_init_profiling(void)
297 struct sysctl_oid *chain_oid;
301 for (i = 0; i < UMTX_CHAINS; ++i) {
302 snprintf(chain_name, sizeof(chain_name), "%d", i);
303 chain_oid = SYSCTL_ADD_NODE(NULL,
304 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
305 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
306 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
307 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
308 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
309 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
314 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
318 struct umtxq_chain *uc;
319 u_int fract, i, j, tot, whole;
320 u_int sf0, sf1, sf2, sf3, sf4;
321 u_int si0, si1, si2, si3, si4;
322 u_int sw0, sw1, sw2, sw3, sw4;
324 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
325 for (i = 0; i < 2; i++) {
327 for (j = 0; j < UMTX_CHAINS; ++j) {
328 uc = &umtxq_chains[i][j];
329 mtx_lock(&uc->uc_lock);
330 tot += uc->max_length;
331 mtx_unlock(&uc->uc_lock);
334 sbuf_printf(&sb, "%u) Empty ", i);
336 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
337 si0 = si1 = si2 = si3 = si4 = 0;
338 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
339 for (j = 0; j < UMTX_CHAINS; j++) {
340 uc = &umtxq_chains[i][j];
341 mtx_lock(&uc->uc_lock);
342 whole = uc->max_length * 100;
343 mtx_unlock(&uc->uc_lock);
344 fract = (whole % tot) * 100;
345 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
349 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
354 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
359 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
364 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
371 sbuf_printf(&sb, "queue %u:\n", i);
372 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
374 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
376 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
378 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
380 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
386 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
392 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
394 struct umtxq_chain *uc;
399 error = sysctl_handle_int(oidp, &clear, 0, req);
400 if (error != 0 || req->newptr == NULL)
404 for (i = 0; i < 2; ++i) {
405 for (j = 0; j < UMTX_CHAINS; ++j) {
406 uc = &umtxq_chains[i][j];
407 mtx_lock(&uc->uc_lock);
410 mtx_unlock(&uc->uc_lock);
417 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
418 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
419 sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics");
420 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
421 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
422 sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length");
426 umtxq_sysinit(void *arg __unused)
430 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
431 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
432 for (i = 0; i < 2; ++i) {
433 for (j = 0; j < UMTX_CHAINS; ++j) {
434 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
435 MTX_DEF | MTX_DUPOK);
436 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
437 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
438 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
439 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
440 umtxq_chains[i][j].uc_busy = 0;
441 umtxq_chains[i][j].uc_waiters = 0;
442 #ifdef UMTX_PROFILING
443 umtxq_chains[i][j].length = 0;
444 umtxq_chains[i][j].max_length = 0;
448 #ifdef UMTX_PROFILING
449 umtx_init_profiling();
451 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
452 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
453 EVENTHANDLER_PRI_ANY);
462 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
463 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
465 TAILQ_INIT(&uq->uq_spare_queue->head);
466 TAILQ_INIT(&uq->uq_pi_contested);
467 uq->uq_inherited_pri = PRI_MAX;
472 umtxq_free(struct umtx_q *uq)
475 MPASS(uq->uq_spare_queue != NULL);
476 free(uq->uq_spare_queue, M_UMTX);
481 umtxq_hash(struct umtx_key *key)
485 n = (uintptr_t)key->info.both.a + key->info.both.b;
486 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
489 static inline struct umtxq_chain *
490 umtxq_getchain(struct umtx_key *key)
493 if (key->type <= TYPE_SEM)
494 return (&umtxq_chains[1][key->hash]);
495 return (&umtxq_chains[0][key->hash]);
502 umtxq_lock(struct umtx_key *key)
504 struct umtxq_chain *uc;
506 uc = umtxq_getchain(key);
507 mtx_lock(&uc->uc_lock);
514 umtxq_unlock(struct umtx_key *key)
516 struct umtxq_chain *uc;
518 uc = umtxq_getchain(key);
519 mtx_unlock(&uc->uc_lock);
523 * Set chain to busy state when following operation
524 * may be blocked (kernel mutex can not be used).
527 umtxq_busy(struct umtx_key *key)
529 struct umtxq_chain *uc;
531 uc = umtxq_getchain(key);
532 mtx_assert(&uc->uc_lock, MA_OWNED);
536 int count = BUSY_SPINS;
539 while (uc->uc_busy && --count > 0)
545 while (uc->uc_busy) {
547 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
558 umtxq_unbusy(struct umtx_key *key)
560 struct umtxq_chain *uc;
562 uc = umtxq_getchain(key);
563 mtx_assert(&uc->uc_lock, MA_OWNED);
564 KASSERT(uc->uc_busy != 0, ("not busy"));
571 umtxq_unbusy_unlocked(struct umtx_key *key)
579 static struct umtxq_queue *
580 umtxq_queue_lookup(struct umtx_key *key, int q)
582 struct umtxq_queue *uh;
583 struct umtxq_chain *uc;
585 uc = umtxq_getchain(key);
586 UMTXQ_LOCKED_ASSERT(uc);
587 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
588 if (umtx_key_match(&uh->key, key))
596 umtxq_insert_queue(struct umtx_q *uq, int q)
598 struct umtxq_queue *uh;
599 struct umtxq_chain *uc;
601 uc = umtxq_getchain(&uq->uq_key);
602 UMTXQ_LOCKED_ASSERT(uc);
603 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
604 uh = umtxq_queue_lookup(&uq->uq_key, q);
606 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
608 uh = uq->uq_spare_queue;
609 uh->key = uq->uq_key;
610 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
611 #ifdef UMTX_PROFILING
613 if (uc->length > uc->max_length) {
614 uc->max_length = uc->length;
615 if (uc->max_length > max_length)
616 max_length = uc->max_length;
620 uq->uq_spare_queue = NULL;
622 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
624 uq->uq_flags |= UQF_UMTXQ;
625 uq->uq_cur_queue = uh;
630 umtxq_remove_queue(struct umtx_q *uq, int q)
632 struct umtxq_chain *uc;
633 struct umtxq_queue *uh;
635 uc = umtxq_getchain(&uq->uq_key);
636 UMTXQ_LOCKED_ASSERT(uc);
637 if (uq->uq_flags & UQF_UMTXQ) {
638 uh = uq->uq_cur_queue;
639 TAILQ_REMOVE(&uh->head, uq, uq_link);
641 uq->uq_flags &= ~UQF_UMTXQ;
642 if (TAILQ_EMPTY(&uh->head)) {
643 KASSERT(uh->length == 0,
644 ("inconsistent umtxq_queue length"));
645 #ifdef UMTX_PROFILING
648 LIST_REMOVE(uh, link);
650 uh = LIST_FIRST(&uc->uc_spare_queue);
651 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
652 LIST_REMOVE(uh, link);
654 uq->uq_spare_queue = uh;
655 uq->uq_cur_queue = NULL;
660 * Check if there are multiple waiters
663 umtxq_count(struct umtx_key *key)
665 struct umtxq_chain *uc;
666 struct umtxq_queue *uh;
668 uc = umtxq_getchain(key);
669 UMTXQ_LOCKED_ASSERT(uc);
670 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
677 * Check if there are multiple PI waiters and returns first
681 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
683 struct umtxq_chain *uc;
684 struct umtxq_queue *uh;
687 uc = umtxq_getchain(key);
688 UMTXQ_LOCKED_ASSERT(uc);
689 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
691 *first = TAILQ_FIRST(&uh->head);
698 umtxq_check_susp(struct thread *td)
704 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
705 * eventually break the lockstep loop.
707 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
712 if (P_SHOULDSTOP(p) ||
713 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
714 if (p->p_flag & P_SINGLE_EXIT)
724 * Wake up threads waiting on an userland object.
728 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
730 struct umtxq_chain *uc;
731 struct umtxq_queue *uh;
736 uc = umtxq_getchain(key);
737 UMTXQ_LOCKED_ASSERT(uc);
738 uh = umtxq_queue_lookup(key, q);
740 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
741 umtxq_remove_queue(uq, q);
752 * Wake up specified thread.
755 umtxq_signal_thread(struct umtx_q *uq)
757 struct umtxq_chain *uc;
759 uc = umtxq_getchain(&uq->uq_key);
760 UMTXQ_LOCKED_ASSERT(uc);
766 tstohz(const struct timespec *tsp)
770 TIMESPEC_TO_TIMEVAL(&tv, tsp);
775 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
776 const struct timespec *timeout)
779 timo->clockid = clockid;
781 timo->is_abs_real = false;
782 abs_timeout_update(timo);
783 timo->end = timo->cur;
784 timespecadd(&timo->end, timeout);
786 timo->end = *timeout;
787 timo->is_abs_real = clockid == CLOCK_REALTIME ||
788 clockid == CLOCK_REALTIME_FAST ||
789 clockid == CLOCK_REALTIME_PRECISE;
791 * If is_abs_real, umtxq_sleep will read the clock
792 * after setting td_rtcgen; otherwise, read it here.
794 if (!timo->is_abs_real) {
795 abs_timeout_update(timo);
801 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
804 abs_timeout_init(timo, umtxtime->_clockid,
805 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
809 abs_timeout_update(struct abs_timeout *timo)
812 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
816 abs_timeout_gethz(struct abs_timeout *timo)
820 if (timespeccmp(&timo->end, &timo->cur, <=))
823 timespecsub(&tts, &timo->cur);
824 return (tstohz(&tts));
828 umtx_unlock_val(uint32_t flags, bool rb)
832 return (UMUTEX_RB_OWNERDEAD);
833 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
834 return (UMUTEX_RB_NOTRECOV);
836 return (UMUTEX_UNOWNED);
841 * Put thread into sleep state, before sleeping, check if
842 * thread was removed from umtx queue.
845 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
847 struct umtxq_chain *uc;
850 if (abstime != NULL && abstime->is_abs_real) {
851 curthread->td_rtcgen = atomic_load_acq_int(&rtc_generation);
852 abs_timeout_update(abstime);
855 uc = umtxq_getchain(&uq->uq_key);
856 UMTXQ_LOCKED_ASSERT(uc);
858 if (!(uq->uq_flags & UQF_UMTXQ)) {
862 if (abstime != NULL) {
863 timo = abs_timeout_gethz(abstime);
870 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
871 if (error == EINTR || error == ERESTART) {
872 umtxq_lock(&uq->uq_key);
875 if (abstime != NULL) {
876 if (abstime->is_abs_real)
877 curthread->td_rtcgen =
878 atomic_load_acq_int(&rtc_generation);
879 abs_timeout_update(abstime);
881 umtxq_lock(&uq->uq_key);
884 curthread->td_rtcgen = 0;
889 * Convert userspace address into unique logical address.
892 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
894 struct thread *td = curthread;
896 vm_map_entry_t entry;
902 if (share == THREAD_SHARE) {
904 key->info.private.vs = td->td_proc->p_vmspace;
905 key->info.private.addr = (uintptr_t)addr;
907 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
908 map = &td->td_proc->p_vmspace->vm_map;
909 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
910 &entry, &key->info.shared.object, &pindex, &prot,
911 &wired) != KERN_SUCCESS) {
915 if ((share == PROCESS_SHARE) ||
916 (share == AUTO_SHARE &&
917 VM_INHERIT_SHARE == entry->inheritance)) {
919 key->info.shared.offset = (vm_offset_t)addr -
920 entry->start + entry->offset;
921 vm_object_reference(key->info.shared.object);
924 key->info.private.vs = td->td_proc->p_vmspace;
925 key->info.private.addr = (uintptr_t)addr;
927 vm_map_lookup_done(map, entry);
938 umtx_key_release(struct umtx_key *key)
941 vm_object_deallocate(key->info.shared.object);
945 * Fetch and compare value, sleep on the address if value is not changed.
948 do_wait(struct thread *td, void *addr, u_long id,
949 struct _umtx_time *timeout, int compat32, int is_private)
951 struct abs_timeout timo;
958 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
959 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
963 abs_timeout_init2(&timo, timeout);
965 umtxq_lock(&uq->uq_key);
967 umtxq_unlock(&uq->uq_key);
969 error = fueword(addr, &tmp);
973 error = fueword32(addr, &tmp32);
979 umtxq_lock(&uq->uq_key);
982 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
984 if ((uq->uq_flags & UQF_UMTXQ) == 0)
988 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
991 umtxq_unlock(&uq->uq_key);
992 umtx_key_release(&uq->uq_key);
993 if (error == ERESTART)
999 * Wake up threads sleeping on the specified address.
1002 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1004 struct umtx_key key;
1007 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1008 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1011 umtxq_signal(&key, n_wake);
1013 umtx_key_release(&key);
1018 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1021 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1022 struct _umtx_time *timeout, int mode)
1024 struct abs_timeout timo;
1026 uint32_t owner, old, id;
1032 if (timeout != NULL)
1033 abs_timeout_init2(&timo, timeout);
1036 * Care must be exercised when dealing with umtx structure. It
1037 * can fault on any access.
1040 rv = fueword32(&m->m_owner, &owner);
1043 if (mode == _UMUTEX_WAIT) {
1044 if (owner == UMUTEX_UNOWNED ||
1045 owner == UMUTEX_CONTESTED ||
1046 owner == UMUTEX_RB_OWNERDEAD ||
1047 owner == UMUTEX_RB_NOTRECOV)
1051 * Robust mutex terminated. Kernel duty is to
1052 * return EOWNERDEAD to the userspace. The
1053 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1054 * by the common userspace code.
1056 if (owner == UMUTEX_RB_OWNERDEAD) {
1057 rv = casueword32(&m->m_owner,
1058 UMUTEX_RB_OWNERDEAD, &owner,
1059 id | UMUTEX_CONTESTED);
1062 if (owner == UMUTEX_RB_OWNERDEAD)
1063 return (EOWNERDEAD); /* success */
1064 rv = umtxq_check_susp(td);
1069 if (owner == UMUTEX_RB_NOTRECOV)
1070 return (ENOTRECOVERABLE);
1074 * Try the uncontested case. This should be
1077 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1079 /* The address was invalid. */
1083 /* The acquire succeeded. */
1084 if (owner == UMUTEX_UNOWNED)
1088 * If no one owns it but it is contested try
1091 if (owner == UMUTEX_CONTESTED) {
1092 rv = casueword32(&m->m_owner,
1093 UMUTEX_CONTESTED, &owner,
1094 id | UMUTEX_CONTESTED);
1095 /* The address was invalid. */
1099 if (owner == UMUTEX_CONTESTED)
1102 rv = umtxq_check_susp(td);
1107 * If this failed the lock has
1114 if (mode == _UMUTEX_TRY)
1118 * If we caught a signal, we have retried and now
1124 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1125 GET_SHARE(flags), &uq->uq_key)) != 0)
1128 umtxq_lock(&uq->uq_key);
1129 umtxq_busy(&uq->uq_key);
1131 umtxq_unlock(&uq->uq_key);
1134 * Set the contested bit so that a release in user space
1135 * knows to use the system call for unlock. If this fails
1136 * either some one else has acquired the lock or it has been
1139 rv = casueword32(&m->m_owner, owner, &old,
1140 owner | UMUTEX_CONTESTED);
1142 /* The address was invalid. */
1144 umtxq_lock(&uq->uq_key);
1146 umtxq_unbusy(&uq->uq_key);
1147 umtxq_unlock(&uq->uq_key);
1148 umtx_key_release(&uq->uq_key);
1153 * We set the contested bit, sleep. Otherwise the lock changed
1154 * and we need to retry or we lost a race to the thread
1155 * unlocking the umtx.
1157 umtxq_lock(&uq->uq_key);
1158 umtxq_unbusy(&uq->uq_key);
1160 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1163 umtxq_unlock(&uq->uq_key);
1164 umtx_key_release(&uq->uq_key);
1167 error = umtxq_check_susp(td);
1174 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1177 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1179 struct umtx_key key;
1180 uint32_t owner, old, id, newlock;
1185 * Make sure we own this mtx.
1187 error = fueword32(&m->m_owner, &owner);
1191 if ((owner & ~UMUTEX_CONTESTED) != id)
1194 newlock = umtx_unlock_val(flags, rb);
1195 if ((owner & UMUTEX_CONTESTED) == 0) {
1196 error = casueword32(&m->m_owner, owner, &old, newlock);
1204 /* We should only ever be in here for contested locks */
1205 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1211 count = umtxq_count(&key);
1215 * When unlocking the umtx, it must be marked as unowned if
1216 * there is zero or one thread only waiting for it.
1217 * Otherwise, it must be marked as contested.
1220 newlock |= UMUTEX_CONTESTED;
1221 error = casueword32(&m->m_owner, owner, &old, newlock);
1223 umtxq_signal(&key, 1);
1226 umtx_key_release(&key);
1235 * Check if the mutex is available and wake up a waiter,
1236 * only for simple mutex.
1239 do_wake_umutex(struct thread *td, struct umutex *m)
1241 struct umtx_key key;
1247 error = fueword32(&m->m_owner, &owner);
1251 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1252 owner != UMUTEX_RB_NOTRECOV)
1255 error = fueword32(&m->m_flags, &flags);
1259 /* We should only ever be in here for contested locks */
1260 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1266 count = umtxq_count(&key);
1269 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1270 owner != UMUTEX_RB_NOTRECOV) {
1271 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1278 if (error == 0 && count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1279 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1280 umtxq_signal(&key, 1);
1283 umtx_key_release(&key);
1288 * Check if the mutex has waiters and tries to fix contention bit.
1291 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1293 struct umtx_key key;
1294 uint32_t owner, old;
1299 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1303 type = TYPE_NORMAL_UMUTEX;
1305 case UMUTEX_PRIO_INHERIT:
1306 type = TYPE_PI_UMUTEX;
1308 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1309 type = TYPE_PI_ROBUST_UMUTEX;
1311 case UMUTEX_PRIO_PROTECT:
1312 type = TYPE_PP_UMUTEX;
1314 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1315 type = TYPE_PP_ROBUST_UMUTEX;
1320 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1326 count = umtxq_count(&key);
1329 * Only repair contention bit if there is a waiter, this means the mutex
1330 * is still being referenced by userland code, otherwise don't update
1334 error = fueword32(&m->m_owner, &owner);
1337 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0) {
1338 error = casueword32(&m->m_owner, owner, &old,
1339 owner | UMUTEX_CONTESTED);
1347 error = umtxq_check_susp(td);
1351 } else if (count == 1) {
1352 error = fueword32(&m->m_owner, &owner);
1355 while (error == 0 && (owner & ~UMUTEX_CONTESTED) != 0 &&
1356 (owner & UMUTEX_CONTESTED) == 0) {
1357 error = casueword32(&m->m_owner, owner, &old,
1358 owner | UMUTEX_CONTESTED);
1366 error = umtxq_check_susp(td);
1372 if (error == EFAULT) {
1373 umtxq_signal(&key, INT_MAX);
1374 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1375 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1376 umtxq_signal(&key, 1);
1379 umtx_key_release(&key);
1383 static inline struct umtx_pi *
1384 umtx_pi_alloc(int flags)
1388 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1389 TAILQ_INIT(&pi->pi_blocked);
1390 atomic_add_int(&umtx_pi_allocated, 1);
1395 umtx_pi_free(struct umtx_pi *pi)
1397 uma_zfree(umtx_pi_zone, pi);
1398 atomic_add_int(&umtx_pi_allocated, -1);
1402 * Adjust the thread's position on a pi_state after its priority has been
1406 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1408 struct umtx_q *uq, *uq1, *uq2;
1411 mtx_assert(&umtx_lock, MA_OWNED);
1418 * Check if the thread needs to be moved on the blocked chain.
1419 * It needs to be moved if either its priority is lower than
1420 * the previous thread or higher than the next thread.
1422 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1423 uq2 = TAILQ_NEXT(uq, uq_lockq);
1424 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1425 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1427 * Remove thread from blocked chain and determine where
1428 * it should be moved to.
1430 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1431 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1432 td1 = uq1->uq_thread;
1433 MPASS(td1->td_proc->p_magic == P_MAGIC);
1434 if (UPRI(td1) > UPRI(td))
1439 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1441 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1446 static struct umtx_pi *
1447 umtx_pi_next(struct umtx_pi *pi)
1449 struct umtx_q *uq_owner;
1451 if (pi->pi_owner == NULL)
1453 uq_owner = pi->pi_owner->td_umtxq;
1454 if (uq_owner == NULL)
1456 return (uq_owner->uq_pi_blocked);
1460 * Floyd's Cycle-Finding Algorithm.
1463 umtx_pi_check_loop(struct umtx_pi *pi)
1465 struct umtx_pi *pi1; /* fast iterator */
1467 mtx_assert(&umtx_lock, MA_OWNED);
1472 pi = umtx_pi_next(pi);
1475 pi1 = umtx_pi_next(pi1);
1478 pi1 = umtx_pi_next(pi1);
1488 * Propagate priority when a thread is blocked on POSIX
1492 umtx_propagate_priority(struct thread *td)
1498 mtx_assert(&umtx_lock, MA_OWNED);
1501 pi = uq->uq_pi_blocked;
1504 if (umtx_pi_check_loop(pi))
1509 if (td == NULL || td == curthread)
1512 MPASS(td->td_proc != NULL);
1513 MPASS(td->td_proc->p_magic == P_MAGIC);
1516 if (td->td_lend_user_pri > pri)
1517 sched_lend_user_prio(td, pri);
1525 * Pick up the lock that td is blocked on.
1528 pi = uq->uq_pi_blocked;
1531 /* Resort td on the list if needed. */
1532 umtx_pi_adjust_thread(pi, td);
1537 * Unpropagate priority for a PI mutex when a thread blocked on
1538 * it is interrupted by signal or resumed by others.
1541 umtx_repropagate_priority(struct umtx_pi *pi)
1543 struct umtx_q *uq, *uq_owner;
1544 struct umtx_pi *pi2;
1547 mtx_assert(&umtx_lock, MA_OWNED);
1549 if (umtx_pi_check_loop(pi))
1551 while (pi != NULL && pi->pi_owner != NULL) {
1553 uq_owner = pi->pi_owner->td_umtxq;
1555 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1556 uq = TAILQ_FIRST(&pi2->pi_blocked);
1558 if (pri > UPRI(uq->uq_thread))
1559 pri = UPRI(uq->uq_thread);
1563 if (pri > uq_owner->uq_inherited_pri)
1564 pri = uq_owner->uq_inherited_pri;
1565 thread_lock(pi->pi_owner);
1566 sched_lend_user_prio(pi->pi_owner, pri);
1567 thread_unlock(pi->pi_owner);
1568 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1569 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1574 * Insert a PI mutex into owned list.
1577 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1579 struct umtx_q *uq_owner;
1581 uq_owner = owner->td_umtxq;
1582 mtx_assert(&umtx_lock, MA_OWNED);
1583 MPASS(pi->pi_owner == NULL);
1584 pi->pi_owner = owner;
1585 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1590 * Disown a PI mutex, and remove it from the owned list.
1593 umtx_pi_disown(struct umtx_pi *pi)
1596 mtx_assert(&umtx_lock, MA_OWNED);
1597 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1598 pi->pi_owner = NULL;
1602 * Claim ownership of a PI mutex.
1605 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1610 mtx_lock(&umtx_lock);
1611 if (pi->pi_owner == owner) {
1612 mtx_unlock(&umtx_lock);
1616 if (pi->pi_owner != NULL) {
1618 * userland may have already messed the mutex, sigh.
1620 mtx_unlock(&umtx_lock);
1623 umtx_pi_setowner(pi, owner);
1624 uq = TAILQ_FIRST(&pi->pi_blocked);
1626 pri = UPRI(uq->uq_thread);
1628 if (pri < UPRI(owner))
1629 sched_lend_user_prio(owner, pri);
1630 thread_unlock(owner);
1632 mtx_unlock(&umtx_lock);
1637 * Adjust a thread's order position in its blocked PI mutex,
1638 * this may result new priority propagating process.
1641 umtx_pi_adjust(struct thread *td, u_char oldpri)
1647 mtx_lock(&umtx_lock);
1649 * Pick up the lock that td is blocked on.
1651 pi = uq->uq_pi_blocked;
1653 umtx_pi_adjust_thread(pi, td);
1654 umtx_repropagate_priority(pi);
1656 mtx_unlock(&umtx_lock);
1660 * Sleep on a PI mutex.
1663 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1664 const char *wmesg, struct abs_timeout *timo, bool shared)
1666 struct umtxq_chain *uc;
1667 struct thread *td, *td1;
1673 KASSERT(td == curthread, ("inconsistent uq_thread"));
1674 uc = umtxq_getchain(&uq->uq_key);
1675 UMTXQ_LOCKED_ASSERT(uc);
1676 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1678 mtx_lock(&umtx_lock);
1679 if (pi->pi_owner == NULL) {
1680 mtx_unlock(&umtx_lock);
1681 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
1682 mtx_lock(&umtx_lock);
1684 if (pi->pi_owner == NULL)
1685 umtx_pi_setowner(pi, td1);
1686 PROC_UNLOCK(td1->td_proc);
1690 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1691 pri = UPRI(uq1->uq_thread);
1697 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1699 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1701 uq->uq_pi_blocked = pi;
1703 td->td_flags |= TDF_UPIBLOCKED;
1705 umtx_propagate_priority(td);
1706 mtx_unlock(&umtx_lock);
1707 umtxq_unbusy(&uq->uq_key);
1709 error = umtxq_sleep(uq, wmesg, timo);
1712 mtx_lock(&umtx_lock);
1713 uq->uq_pi_blocked = NULL;
1715 td->td_flags &= ~TDF_UPIBLOCKED;
1717 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1718 umtx_repropagate_priority(pi);
1719 mtx_unlock(&umtx_lock);
1720 umtxq_unlock(&uq->uq_key);
1726 * Add reference count for a PI mutex.
1729 umtx_pi_ref(struct umtx_pi *pi)
1731 struct umtxq_chain *uc;
1733 uc = umtxq_getchain(&pi->pi_key);
1734 UMTXQ_LOCKED_ASSERT(uc);
1739 * Decrease reference count for a PI mutex, if the counter
1740 * is decreased to zero, its memory space is freed.
1743 umtx_pi_unref(struct umtx_pi *pi)
1745 struct umtxq_chain *uc;
1747 uc = umtxq_getchain(&pi->pi_key);
1748 UMTXQ_LOCKED_ASSERT(uc);
1749 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1750 if (--pi->pi_refcount == 0) {
1751 mtx_lock(&umtx_lock);
1752 if (pi->pi_owner != NULL)
1754 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1755 ("blocked queue not empty"));
1756 mtx_unlock(&umtx_lock);
1757 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1763 * Find a PI mutex in hash table.
1765 static struct umtx_pi *
1766 umtx_pi_lookup(struct umtx_key *key)
1768 struct umtxq_chain *uc;
1771 uc = umtxq_getchain(key);
1772 UMTXQ_LOCKED_ASSERT(uc);
1774 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1775 if (umtx_key_match(&pi->pi_key, key)) {
1783 * Insert a PI mutex into hash table.
1786 umtx_pi_insert(struct umtx_pi *pi)
1788 struct umtxq_chain *uc;
1790 uc = umtxq_getchain(&pi->pi_key);
1791 UMTXQ_LOCKED_ASSERT(uc);
1792 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1799 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1800 struct _umtx_time *timeout, int try)
1802 struct abs_timeout timo;
1804 struct umtx_pi *pi, *new_pi;
1805 uint32_t id, old_owner, owner, old;
1811 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
1812 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
1816 if (timeout != NULL)
1817 abs_timeout_init2(&timo, timeout);
1819 umtxq_lock(&uq->uq_key);
1820 pi = umtx_pi_lookup(&uq->uq_key);
1822 new_pi = umtx_pi_alloc(M_NOWAIT);
1823 if (new_pi == NULL) {
1824 umtxq_unlock(&uq->uq_key);
1825 new_pi = umtx_pi_alloc(M_WAITOK);
1826 umtxq_lock(&uq->uq_key);
1827 pi = umtx_pi_lookup(&uq->uq_key);
1829 umtx_pi_free(new_pi);
1833 if (new_pi != NULL) {
1834 new_pi->pi_key = uq->uq_key;
1835 umtx_pi_insert(new_pi);
1840 umtxq_unlock(&uq->uq_key);
1843 * Care must be exercised when dealing with umtx structure. It
1844 * can fault on any access.
1848 * Try the uncontested case. This should be done in userland.
1850 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
1851 /* The address was invalid. */
1857 /* The acquire succeeded. */
1858 if (owner == UMUTEX_UNOWNED) {
1863 if (owner == UMUTEX_RB_NOTRECOV) {
1864 error = ENOTRECOVERABLE;
1868 /* If no one owns it but it is contested try to acquire it. */
1869 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
1871 rv = casueword32(&m->m_owner, owner, &owner,
1872 id | UMUTEX_CONTESTED);
1873 /* The address was invalid. */
1879 if (owner == old_owner) {
1880 umtxq_lock(&uq->uq_key);
1881 umtxq_busy(&uq->uq_key);
1882 error = umtx_pi_claim(pi, td);
1883 umtxq_unbusy(&uq->uq_key);
1884 umtxq_unlock(&uq->uq_key);
1887 * Since we're going to return an
1888 * error, restore the m_owner to its
1889 * previous, unowned state to avoid
1890 * compounding the problem.
1892 (void)casuword32(&m->m_owner,
1893 id | UMUTEX_CONTESTED,
1897 old_owner == UMUTEX_RB_OWNERDEAD)
1902 error = umtxq_check_susp(td);
1906 /* If this failed the lock has changed, restart. */
1910 if ((owner & ~UMUTEX_CONTESTED) == id) {
1921 * If we caught a signal, we have retried and now
1927 umtxq_lock(&uq->uq_key);
1928 umtxq_busy(&uq->uq_key);
1929 umtxq_unlock(&uq->uq_key);
1932 * Set the contested bit so that a release in user space
1933 * knows to use the system call for unlock. If this fails
1934 * either some one else has acquired the lock or it has been
1937 rv = casueword32(&m->m_owner, owner, &old, owner |
1940 /* The address was invalid. */
1942 umtxq_unbusy_unlocked(&uq->uq_key);
1947 umtxq_lock(&uq->uq_key);
1949 * We set the contested bit, sleep. Otherwise the lock changed
1950 * and we need to retry or we lost a race to the thread
1951 * unlocking the umtx. Note that the UMUTEX_RB_OWNERDEAD
1952 * value for owner is impossible there.
1955 error = umtxq_sleep_pi(uq, pi,
1956 owner & ~UMUTEX_CONTESTED,
1957 "umtxpi", timeout == NULL ? NULL : &timo,
1958 (flags & USYNC_PROCESS_SHARED) != 0);
1962 umtxq_unbusy(&uq->uq_key);
1963 umtxq_unlock(&uq->uq_key);
1966 error = umtxq_check_susp(td);
1971 umtxq_lock(&uq->uq_key);
1973 umtxq_unlock(&uq->uq_key);
1975 umtx_key_release(&uq->uq_key);
1980 * Unlock a PI mutex.
1983 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1985 struct umtx_key key;
1986 struct umtx_q *uq_first, *uq_first2, *uq_me;
1987 struct umtx_pi *pi, *pi2;
1988 uint32_t id, new_owner, old, owner;
1989 int count, error, pri;
1993 * Make sure we own this mtx.
1995 error = fueword32(&m->m_owner, &owner);
1999 if ((owner & ~UMUTEX_CONTESTED) != id)
2002 new_owner = umtx_unlock_val(flags, rb);
2004 /* This should be done in userland */
2005 if ((owner & UMUTEX_CONTESTED) == 0) {
2006 error = casueword32(&m->m_owner, owner, &old, new_owner);
2014 /* We should only ever be in here for contested locks */
2015 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2016 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2022 count = umtxq_count_pi(&key, &uq_first);
2023 if (uq_first != NULL) {
2024 mtx_lock(&umtx_lock);
2025 pi = uq_first->uq_pi_blocked;
2026 KASSERT(pi != NULL, ("pi == NULL?"));
2027 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2028 mtx_unlock(&umtx_lock);
2031 umtx_key_release(&key);
2032 /* userland messed the mutex */
2035 uq_me = td->td_umtxq;
2036 if (pi->pi_owner == td)
2038 /* get highest priority thread which is still sleeping. */
2039 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2040 while (uq_first != NULL &&
2041 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2042 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2045 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2046 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2047 if (uq_first2 != NULL) {
2048 if (pri > UPRI(uq_first2->uq_thread))
2049 pri = UPRI(uq_first2->uq_thread);
2053 sched_lend_user_prio(td, pri);
2055 mtx_unlock(&umtx_lock);
2057 umtxq_signal_thread(uq_first);
2059 pi = umtx_pi_lookup(&key);
2061 * A umtx_pi can exist if a signal or timeout removed the
2062 * last waiter from the umtxq, but there is still
2063 * a thread in do_lock_pi() holding the umtx_pi.
2067 * The umtx_pi can be unowned, such as when a thread
2068 * has just entered do_lock_pi(), allocated the
2069 * umtx_pi, and unlocked the umtxq.
2070 * If the current thread owns it, it must disown it.
2072 mtx_lock(&umtx_lock);
2073 if (pi->pi_owner == td)
2075 mtx_unlock(&umtx_lock);
2081 * When unlocking the umtx, it must be marked as unowned if
2082 * there is zero or one thread only waiting for it.
2083 * Otherwise, it must be marked as contested.
2087 new_owner |= UMUTEX_CONTESTED;
2088 error = casueword32(&m->m_owner, owner, &old, new_owner);
2090 umtxq_unbusy_unlocked(&key);
2091 umtx_key_release(&key);
2103 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2104 struct _umtx_time *timeout, int try)
2106 struct abs_timeout timo;
2107 struct umtx_q *uq, *uq2;
2111 int error, pri, old_inherited_pri, su, rv;
2115 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2116 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2120 if (timeout != NULL)
2121 abs_timeout_init2(&timo, timeout);
2123 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2125 old_inherited_pri = uq->uq_inherited_pri;
2126 umtxq_lock(&uq->uq_key);
2127 umtxq_busy(&uq->uq_key);
2128 umtxq_unlock(&uq->uq_key);
2130 rv = fueword32(&m->m_ceilings[0], &ceiling);
2135 ceiling = RTP_PRIO_MAX - ceiling;
2136 if (ceiling > RTP_PRIO_MAX) {
2141 mtx_lock(&umtx_lock);
2142 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2143 mtx_unlock(&umtx_lock);
2147 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2148 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2150 if (uq->uq_inherited_pri < UPRI(td))
2151 sched_lend_user_prio(td, uq->uq_inherited_pri);
2154 mtx_unlock(&umtx_lock);
2156 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2157 id | UMUTEX_CONTESTED);
2158 /* The address was invalid. */
2164 if (owner == UMUTEX_CONTESTED) {
2167 } else if (owner == UMUTEX_RB_OWNERDEAD) {
2168 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2169 &owner, id | UMUTEX_CONTESTED);
2174 if (owner == UMUTEX_RB_OWNERDEAD) {
2175 error = EOWNERDEAD; /* success */
2179 } else if (owner == UMUTEX_RB_NOTRECOV) {
2180 error = ENOTRECOVERABLE;
2190 * If we caught a signal, we have retried and now
2196 umtxq_lock(&uq->uq_key);
2198 umtxq_unbusy(&uq->uq_key);
2199 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2202 umtxq_unlock(&uq->uq_key);
2204 mtx_lock(&umtx_lock);
2205 uq->uq_inherited_pri = old_inherited_pri;
2207 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2208 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2210 if (pri > UPRI(uq2->uq_thread))
2211 pri = UPRI(uq2->uq_thread);
2214 if (pri > uq->uq_inherited_pri)
2215 pri = uq->uq_inherited_pri;
2217 sched_lend_user_prio(td, pri);
2219 mtx_unlock(&umtx_lock);
2222 if (error != 0 && error != EOWNERDEAD) {
2223 mtx_lock(&umtx_lock);
2224 uq->uq_inherited_pri = old_inherited_pri;
2226 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2227 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2229 if (pri > UPRI(uq2->uq_thread))
2230 pri = UPRI(uq2->uq_thread);
2233 if (pri > uq->uq_inherited_pri)
2234 pri = uq->uq_inherited_pri;
2236 sched_lend_user_prio(td, pri);
2238 mtx_unlock(&umtx_lock);
2242 umtxq_unbusy_unlocked(&uq->uq_key);
2243 umtx_key_release(&uq->uq_key);
2248 * Unlock a PP mutex.
2251 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2253 struct umtx_key key;
2254 struct umtx_q *uq, *uq2;
2256 uint32_t id, owner, rceiling;
2257 int error, pri, new_inherited_pri, su;
2261 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2264 * Make sure we own this mtx.
2266 error = fueword32(&m->m_owner, &owner);
2270 if ((owner & ~UMUTEX_CONTESTED) != id)
2273 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2278 new_inherited_pri = PRI_MAX;
2280 rceiling = RTP_PRIO_MAX - rceiling;
2281 if (rceiling > RTP_PRIO_MAX)
2283 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2286 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2287 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2294 * For priority protected mutex, always set unlocked state
2295 * to UMUTEX_CONTESTED, so that userland always enters kernel
2296 * to lock the mutex, it is necessary because thread priority
2297 * has to be adjusted for such mutex.
2299 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2304 umtxq_signal(&key, 1);
2311 mtx_lock(&umtx_lock);
2313 uq->uq_inherited_pri = new_inherited_pri;
2315 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2316 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2318 if (pri > UPRI(uq2->uq_thread))
2319 pri = UPRI(uq2->uq_thread);
2322 if (pri > uq->uq_inherited_pri)
2323 pri = uq->uq_inherited_pri;
2325 sched_lend_user_prio(td, pri);
2327 mtx_unlock(&umtx_lock);
2329 umtx_key_release(&key);
2334 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2335 uint32_t *old_ceiling)
2338 uint32_t flags, id, owner, save_ceiling;
2341 error = fueword32(&m->m_flags, &flags);
2344 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2346 if (ceiling > RTP_PRIO_MAX)
2350 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2351 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2355 umtxq_lock(&uq->uq_key);
2356 umtxq_busy(&uq->uq_key);
2357 umtxq_unlock(&uq->uq_key);
2359 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2365 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2366 id | UMUTEX_CONTESTED);
2372 if (owner == UMUTEX_CONTESTED) {
2373 rv = suword32(&m->m_ceilings[0], ceiling);
2374 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2375 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2379 if ((owner & ~UMUTEX_CONTESTED) == id) {
2380 rv = suword32(&m->m_ceilings[0], ceiling);
2381 error = rv == 0 ? 0 : EFAULT;
2385 if (owner == UMUTEX_RB_OWNERDEAD) {
2388 } else if (owner == UMUTEX_RB_NOTRECOV) {
2389 error = ENOTRECOVERABLE;
2394 * If we caught a signal, we have retried and now
2401 * We set the contested bit, sleep. Otherwise the lock changed
2402 * and we need to retry or we lost a race to the thread
2403 * unlocking the umtx.
2405 umtxq_lock(&uq->uq_key);
2407 umtxq_unbusy(&uq->uq_key);
2408 error = umtxq_sleep(uq, "umtxpp", NULL);
2410 umtxq_unlock(&uq->uq_key);
2412 umtxq_lock(&uq->uq_key);
2414 umtxq_signal(&uq->uq_key, INT_MAX);
2415 umtxq_unbusy(&uq->uq_key);
2416 umtxq_unlock(&uq->uq_key);
2417 umtx_key_release(&uq->uq_key);
2418 if (error == 0 && old_ceiling != NULL) {
2419 rv = suword32(old_ceiling, save_ceiling);
2420 error = rv == 0 ? 0 : EFAULT;
2426 * Lock a userland POSIX mutex.
2429 do_lock_umutex(struct thread *td, struct umutex *m,
2430 struct _umtx_time *timeout, int mode)
2435 error = fueword32(&m->m_flags, &flags);
2439 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2441 error = do_lock_normal(td, m, flags, timeout, mode);
2443 case UMUTEX_PRIO_INHERIT:
2444 error = do_lock_pi(td, m, flags, timeout, mode);
2446 case UMUTEX_PRIO_PROTECT:
2447 error = do_lock_pp(td, m, flags, timeout, mode);
2452 if (timeout == NULL) {
2453 if (error == EINTR && mode != _UMUTEX_WAIT)
2456 /* Timed-locking is not restarted. */
2457 if (error == ERESTART)
2464 * Unlock a userland POSIX mutex.
2467 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2472 error = fueword32(&m->m_flags, &flags);
2476 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2478 return (do_unlock_normal(td, m, flags, rb));
2479 case UMUTEX_PRIO_INHERIT:
2480 return (do_unlock_pi(td, m, flags, rb));
2481 case UMUTEX_PRIO_PROTECT:
2482 return (do_unlock_pp(td, m, flags, rb));
2489 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2490 struct timespec *timeout, u_long wflags)
2492 struct abs_timeout timo;
2494 uint32_t flags, clockid, hasw;
2498 error = fueword32(&cv->c_flags, &flags);
2501 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2505 if ((wflags & CVWAIT_CLOCKID) != 0) {
2506 error = fueword32(&cv->c_clockid, &clockid);
2508 umtx_key_release(&uq->uq_key);
2511 if (clockid < CLOCK_REALTIME ||
2512 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2513 /* hmm, only HW clock id will work. */
2514 umtx_key_release(&uq->uq_key);
2518 clockid = CLOCK_REALTIME;
2521 umtxq_lock(&uq->uq_key);
2522 umtxq_busy(&uq->uq_key);
2524 umtxq_unlock(&uq->uq_key);
2527 * Set c_has_waiters to 1 before releasing user mutex, also
2528 * don't modify cache line when unnecessary.
2530 error = fueword32(&cv->c_has_waiters, &hasw);
2531 if (error == 0 && hasw == 0)
2532 suword32(&cv->c_has_waiters, 1);
2534 umtxq_unbusy_unlocked(&uq->uq_key);
2536 error = do_unlock_umutex(td, m, false);
2538 if (timeout != NULL)
2539 abs_timeout_init(&timo, clockid, (wflags & CVWAIT_ABSTIME) != 0,
2542 umtxq_lock(&uq->uq_key);
2544 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2548 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2552 * This must be timeout,interrupted by signal or
2553 * surprious wakeup, clear c_has_waiter flag when
2556 umtxq_busy(&uq->uq_key);
2557 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2558 int oldlen = uq->uq_cur_queue->length;
2561 umtxq_unlock(&uq->uq_key);
2562 suword32(&cv->c_has_waiters, 0);
2563 umtxq_lock(&uq->uq_key);
2566 umtxq_unbusy(&uq->uq_key);
2567 if (error == ERESTART)
2571 umtxq_unlock(&uq->uq_key);
2572 umtx_key_release(&uq->uq_key);
2577 * Signal a userland condition variable.
2580 do_cv_signal(struct thread *td, struct ucond *cv)
2582 struct umtx_key key;
2583 int error, cnt, nwake;
2586 error = fueword32(&cv->c_flags, &flags);
2589 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2593 cnt = umtxq_count(&key);
2594 nwake = umtxq_signal(&key, 1);
2597 error = suword32(&cv->c_has_waiters, 0);
2604 umtx_key_release(&key);
2609 do_cv_broadcast(struct thread *td, struct ucond *cv)
2611 struct umtx_key key;
2615 error = fueword32(&cv->c_flags, &flags);
2618 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2623 umtxq_signal(&key, INT_MAX);
2626 error = suword32(&cv->c_has_waiters, 0);
2630 umtxq_unbusy_unlocked(&key);
2632 umtx_key_release(&key);
2637 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
2639 struct abs_timeout timo;
2641 uint32_t flags, wrflags;
2642 int32_t state, oldstate;
2643 int32_t blocked_readers;
2644 int error, error1, rv;
2647 error = fueword32(&rwlock->rw_flags, &flags);
2650 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2654 if (timeout != NULL)
2655 abs_timeout_init2(&timo, timeout);
2657 wrflags = URWLOCK_WRITE_OWNER;
2658 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2659 wrflags |= URWLOCK_WRITE_WAITERS;
2662 rv = fueword32(&rwlock->rw_state, &state);
2664 umtx_key_release(&uq->uq_key);
2668 /* try to lock it */
2669 while (!(state & wrflags)) {
2670 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2671 umtx_key_release(&uq->uq_key);
2674 rv = casueword32(&rwlock->rw_state, state,
2675 &oldstate, state + 1);
2677 umtx_key_release(&uq->uq_key);
2680 if (oldstate == state) {
2681 umtx_key_release(&uq->uq_key);
2684 error = umtxq_check_susp(td);
2693 /* grab monitor lock */
2694 umtxq_lock(&uq->uq_key);
2695 umtxq_busy(&uq->uq_key);
2696 umtxq_unlock(&uq->uq_key);
2699 * re-read the state, in case it changed between the try-lock above
2700 * and the check below
2702 rv = fueword32(&rwlock->rw_state, &state);
2706 /* set read contention bit */
2707 while (error == 0 && (state & wrflags) &&
2708 !(state & URWLOCK_READ_WAITERS)) {
2709 rv = casueword32(&rwlock->rw_state, state,
2710 &oldstate, state | URWLOCK_READ_WAITERS);
2715 if (oldstate == state)
2718 error = umtxq_check_susp(td);
2723 umtxq_unbusy_unlocked(&uq->uq_key);
2727 /* state is changed while setting flags, restart */
2728 if (!(state & wrflags)) {
2729 umtxq_unbusy_unlocked(&uq->uq_key);
2730 error = umtxq_check_susp(td);
2737 /* contention bit is set, before sleeping, increase read waiter count */
2738 rv = fueword32(&rwlock->rw_blocked_readers,
2741 umtxq_unbusy_unlocked(&uq->uq_key);
2745 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2747 while (state & wrflags) {
2748 umtxq_lock(&uq->uq_key);
2750 umtxq_unbusy(&uq->uq_key);
2752 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2755 umtxq_busy(&uq->uq_key);
2757 umtxq_unlock(&uq->uq_key);
2760 rv = fueword32(&rwlock->rw_state, &state);
2767 /* decrease read waiter count, and may clear read contention bit */
2768 rv = fueword32(&rwlock->rw_blocked_readers,
2771 umtxq_unbusy_unlocked(&uq->uq_key);
2775 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2776 if (blocked_readers == 1) {
2777 rv = fueword32(&rwlock->rw_state, &state);
2779 umtxq_unbusy_unlocked(&uq->uq_key);
2784 rv = casueword32(&rwlock->rw_state, state,
2785 &oldstate, state & ~URWLOCK_READ_WAITERS);
2790 if (oldstate == state)
2793 error1 = umtxq_check_susp(td);
2802 umtxq_unbusy_unlocked(&uq->uq_key);
2806 umtx_key_release(&uq->uq_key);
2807 if (error == ERESTART)
2813 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2815 struct abs_timeout timo;
2818 int32_t state, oldstate;
2819 int32_t blocked_writers;
2820 int32_t blocked_readers;
2821 int error, error1, rv;
2824 error = fueword32(&rwlock->rw_flags, &flags);
2827 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2831 if (timeout != NULL)
2832 abs_timeout_init2(&timo, timeout);
2834 blocked_readers = 0;
2836 rv = fueword32(&rwlock->rw_state, &state);
2838 umtx_key_release(&uq->uq_key);
2841 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2842 rv = casueword32(&rwlock->rw_state, state,
2843 &oldstate, state | URWLOCK_WRITE_OWNER);
2845 umtx_key_release(&uq->uq_key);
2848 if (oldstate == state) {
2849 umtx_key_release(&uq->uq_key);
2853 error = umtxq_check_susp(td);
2859 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2860 blocked_readers != 0) {
2861 umtxq_lock(&uq->uq_key);
2862 umtxq_busy(&uq->uq_key);
2863 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2864 umtxq_unbusy(&uq->uq_key);
2865 umtxq_unlock(&uq->uq_key);
2871 /* grab monitor lock */
2872 umtxq_lock(&uq->uq_key);
2873 umtxq_busy(&uq->uq_key);
2874 umtxq_unlock(&uq->uq_key);
2877 * re-read the state, in case it changed between the try-lock above
2878 * and the check below
2880 rv = fueword32(&rwlock->rw_state, &state);
2884 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
2885 URWLOCK_READER_COUNT(state) != 0) &&
2886 (state & URWLOCK_WRITE_WAITERS) == 0) {
2887 rv = casueword32(&rwlock->rw_state, state,
2888 &oldstate, state | URWLOCK_WRITE_WAITERS);
2893 if (oldstate == state)
2896 error = umtxq_check_susp(td);
2901 umtxq_unbusy_unlocked(&uq->uq_key);
2905 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2906 umtxq_unbusy_unlocked(&uq->uq_key);
2907 error = umtxq_check_susp(td);
2913 rv = fueword32(&rwlock->rw_blocked_writers,
2916 umtxq_unbusy_unlocked(&uq->uq_key);
2920 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2922 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2923 umtxq_lock(&uq->uq_key);
2924 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2925 umtxq_unbusy(&uq->uq_key);
2927 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2930 umtxq_busy(&uq->uq_key);
2931 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2932 umtxq_unlock(&uq->uq_key);
2935 rv = fueword32(&rwlock->rw_state, &state);
2942 rv = fueword32(&rwlock->rw_blocked_writers,
2945 umtxq_unbusy_unlocked(&uq->uq_key);
2949 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2950 if (blocked_writers == 1) {
2951 rv = fueword32(&rwlock->rw_state, &state);
2953 umtxq_unbusy_unlocked(&uq->uq_key);
2958 rv = casueword32(&rwlock->rw_state, state,
2959 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
2964 if (oldstate == state)
2967 error1 = umtxq_check_susp(td);
2969 * We are leaving the URWLOCK_WRITE_WAITERS
2970 * behind, but this should not harm the
2979 rv = fueword32(&rwlock->rw_blocked_readers,
2982 umtxq_unbusy_unlocked(&uq->uq_key);
2987 blocked_readers = 0;
2989 umtxq_unbusy_unlocked(&uq->uq_key);
2992 umtx_key_release(&uq->uq_key);
2993 if (error == ERESTART)
2999 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3003 int32_t state, oldstate;
3004 int error, rv, q, count;
3007 error = fueword32(&rwlock->rw_flags, &flags);
3010 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3014 error = fueword32(&rwlock->rw_state, &state);
3019 if (state & URWLOCK_WRITE_OWNER) {
3021 rv = casueword32(&rwlock->rw_state, state,
3022 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3027 if (oldstate != state) {
3029 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3033 error = umtxq_check_susp(td);
3039 } else if (URWLOCK_READER_COUNT(state) != 0) {
3041 rv = casueword32(&rwlock->rw_state, state,
3042 &oldstate, state - 1);
3047 if (oldstate != state) {
3049 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3053 error = umtxq_check_susp(td);
3066 if (!(flags & URWLOCK_PREFER_READER)) {
3067 if (state & URWLOCK_WRITE_WAITERS) {
3069 q = UMTX_EXCLUSIVE_QUEUE;
3070 } else if (state & URWLOCK_READ_WAITERS) {
3072 q = UMTX_SHARED_QUEUE;
3075 if (state & URWLOCK_READ_WAITERS) {
3077 q = UMTX_SHARED_QUEUE;
3078 } else if (state & URWLOCK_WRITE_WAITERS) {
3080 q = UMTX_EXCLUSIVE_QUEUE;
3085 umtxq_lock(&uq->uq_key);
3086 umtxq_busy(&uq->uq_key);
3087 umtxq_signal_queue(&uq->uq_key, count, q);
3088 umtxq_unbusy(&uq->uq_key);
3089 umtxq_unlock(&uq->uq_key);
3092 umtx_key_release(&uq->uq_key);
3096 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3098 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3100 struct abs_timeout timo;
3102 uint32_t flags, count, count1;
3106 error = fueword32(&sem->_flags, &flags);
3109 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3113 if (timeout != NULL)
3114 abs_timeout_init2(&timo, timeout);
3116 umtxq_lock(&uq->uq_key);
3117 umtxq_busy(&uq->uq_key);
3119 umtxq_unlock(&uq->uq_key);
3120 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3122 rv = fueword32(&sem->_count, &count);
3123 if (rv == -1 || count != 0) {
3124 umtxq_lock(&uq->uq_key);
3125 umtxq_unbusy(&uq->uq_key);
3127 umtxq_unlock(&uq->uq_key);
3128 umtx_key_release(&uq->uq_key);
3129 return (rv == -1 ? EFAULT : 0);
3131 umtxq_lock(&uq->uq_key);
3132 umtxq_unbusy(&uq->uq_key);
3134 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3136 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3140 /* A relative timeout cannot be restarted. */
3141 if (error == ERESTART && timeout != NULL &&
3142 (timeout->_flags & UMTX_ABSTIME) == 0)
3145 umtxq_unlock(&uq->uq_key);
3146 umtx_key_release(&uq->uq_key);
3151 * Signal a userland semaphore.
3154 do_sem_wake(struct thread *td, struct _usem *sem)
3156 struct umtx_key key;
3160 error = fueword32(&sem->_flags, &flags);
3163 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3167 cnt = umtxq_count(&key);
3170 * Check if count is greater than 0, this means the memory is
3171 * still being referenced by user code, so we can safely
3172 * update _has_waiters flag.
3176 error = suword32(&sem->_has_waiters, 0);
3181 umtxq_signal(&key, 1);
3185 umtx_key_release(&key);
3191 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3193 struct abs_timeout timo;
3195 uint32_t count, flags;
3199 flags = fuword32(&sem->_flags);
3200 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3204 if (timeout != NULL)
3205 abs_timeout_init2(&timo, timeout);
3207 umtxq_lock(&uq->uq_key);
3208 umtxq_busy(&uq->uq_key);
3210 umtxq_unlock(&uq->uq_key);
3211 rv = fueword32(&sem->_count, &count);
3213 umtxq_lock(&uq->uq_key);
3214 umtxq_unbusy(&uq->uq_key);
3216 umtxq_unlock(&uq->uq_key);
3217 umtx_key_release(&uq->uq_key);
3221 if (USEM_COUNT(count) != 0) {
3222 umtxq_lock(&uq->uq_key);
3223 umtxq_unbusy(&uq->uq_key);
3225 umtxq_unlock(&uq->uq_key);
3226 umtx_key_release(&uq->uq_key);
3229 if (count == USEM_HAS_WAITERS)
3231 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3233 umtxq_lock(&uq->uq_key);
3234 umtxq_unbusy(&uq->uq_key);
3236 umtxq_unlock(&uq->uq_key);
3237 umtx_key_release(&uq->uq_key);
3243 umtxq_lock(&uq->uq_key);
3244 umtxq_unbusy(&uq->uq_key);
3246 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3248 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3252 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3253 /* A relative timeout cannot be restarted. */
3254 if (error == ERESTART)
3256 if (error == EINTR) {
3257 abs_timeout_update(&timo);
3258 timeout->_timeout = timo.end;
3259 timespecsub(&timeout->_timeout, &timo.cur);
3263 umtxq_unlock(&uq->uq_key);
3264 umtx_key_release(&uq->uq_key);
3269 * Signal a userland semaphore.
3272 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3274 struct umtx_key key;
3276 uint32_t count, flags;
3278 rv = fueword32(&sem->_flags, &flags);
3281 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3285 cnt = umtxq_count(&key);
3288 * If this was the last sleeping thread, clear the waiters
3293 rv = fueword32(&sem->_count, &count);
3294 while (rv != -1 && count & USEM_HAS_WAITERS)
3295 rv = casueword32(&sem->_count, count, &count,
3296 count & ~USEM_HAS_WAITERS);
3302 umtxq_signal(&key, 1);
3306 umtx_key_release(&key);
3311 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
3315 error = copyin(addr, tsp, sizeof(struct timespec));
3317 if (tsp->tv_sec < 0 ||
3318 tsp->tv_nsec >= 1000000000 ||
3326 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
3330 if (size <= sizeof(struct timespec)) {
3331 tp->_clockid = CLOCK_REALTIME;
3333 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
3335 error = copyin(addr, tp, sizeof(struct _umtx_time));
3338 if (tp->_timeout.tv_sec < 0 ||
3339 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3345 __umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap)
3348 return (EOPNOTSUPP);
3352 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
3354 struct _umtx_time timeout, *tm_p;
3357 if (uap->uaddr2 == NULL)
3360 error = umtx_copyin_umtx_time(
3361 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3366 return (do_wait(td, uap->obj, uap->val, tm_p, 0, 0));
3370 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
3372 struct _umtx_time timeout, *tm_p;
3375 if (uap->uaddr2 == NULL)
3378 error = umtx_copyin_umtx_time(
3379 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3384 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3388 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3390 struct _umtx_time *tm_p, timeout;
3393 if (uap->uaddr2 == NULL)
3396 error = umtx_copyin_umtx_time(
3397 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3402 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3406 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3409 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3412 #define BATCH_SIZE 128
3414 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3416 char *uaddrs[BATCH_SIZE], **upp;
3417 int count, error, i, pos, tocopy;
3419 upp = (char **)uap->obj;
3421 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3423 tocopy = MIN(count, BATCH_SIZE);
3424 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3427 for (i = 0; i < tocopy; ++i)
3428 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3435 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3438 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3442 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3444 struct _umtx_time *tm_p, timeout;
3447 /* Allow a null timespec (wait forever). */
3448 if (uap->uaddr2 == NULL)
3451 error = umtx_copyin_umtx_time(
3452 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3457 return (do_lock_umutex(td, uap->obj, tm_p, 0));
3461 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3464 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
3468 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3470 struct _umtx_time *tm_p, timeout;
3473 /* Allow a null timespec (wait forever). */
3474 if (uap->uaddr2 == NULL)
3477 error = umtx_copyin_umtx_time(
3478 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3483 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
3487 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3490 return (do_wake_umutex(td, uap->obj));
3494 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3497 return (do_unlock_umutex(td, uap->obj, false));
3501 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3504 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
3508 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3510 struct timespec *ts, timeout;
3513 /* Allow a null timespec (wait forever). */
3514 if (uap->uaddr2 == NULL)
3517 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3522 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3526 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3529 return (do_cv_signal(td, uap->obj));
3533 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3536 return (do_cv_broadcast(td, uap->obj));
3540 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3542 struct _umtx_time timeout;
3545 /* Allow a null timespec (wait forever). */
3546 if (uap->uaddr2 == NULL) {
3547 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3549 error = umtx_copyin_umtx_time(uap->uaddr2,
3550 (size_t)uap->uaddr1, &timeout);
3553 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3559 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3561 struct _umtx_time timeout;
3564 /* Allow a null timespec (wait forever). */
3565 if (uap->uaddr2 == NULL) {
3566 error = do_rw_wrlock(td, uap->obj, 0);
3568 error = umtx_copyin_umtx_time(uap->uaddr2,
3569 (size_t)uap->uaddr1, &timeout);
3573 error = do_rw_wrlock(td, uap->obj, &timeout);
3579 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3582 return (do_rw_unlock(td, uap->obj));
3585 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3587 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3589 struct _umtx_time *tm_p, timeout;
3592 /* Allow a null timespec (wait forever). */
3593 if (uap->uaddr2 == NULL)
3596 error = umtx_copyin_umtx_time(
3597 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3602 return (do_sem_wait(td, uap->obj, tm_p));
3606 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3609 return (do_sem_wake(td, uap->obj));
3614 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3617 return (do_wake2_umutex(td, uap->obj, uap->val));
3621 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
3623 struct _umtx_time *tm_p, timeout;
3627 /* Allow a null timespec (wait forever). */
3628 if (uap->uaddr2 == NULL) {
3632 uasize = (size_t)uap->uaddr1;
3633 error = umtx_copyin_umtx_time(uap->uaddr2, uasize, &timeout);
3638 error = do_sem2_wait(td, uap->obj, tm_p);
3639 if (error == EINTR && uap->uaddr2 != NULL &&
3640 (timeout._flags & UMTX_ABSTIME) == 0 &&
3641 uasize >= sizeof(struct _umtx_time) + sizeof(struct timespec)) {
3642 error = copyout(&timeout._timeout,
3643 (struct _umtx_time *)uap->uaddr2 + 1,
3644 sizeof(struct timespec));
3654 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap)
3657 return (do_sem2_wake(td, uap->obj));
3660 #define USHM_OBJ_UMTX(o) \
3661 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
3663 #define USHMF_REG_LINKED 0x0001
3664 #define USHMF_OBJ_LINKED 0x0002
3665 struct umtx_shm_reg {
3666 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
3667 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
3668 struct umtx_key ushm_key;
3669 struct ucred *ushm_cred;
3670 struct shmfd *ushm_obj;
3675 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
3676 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
3678 static uma_zone_t umtx_shm_reg_zone;
3679 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
3680 static struct mtx umtx_shm_lock;
3681 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
3682 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
3684 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
3687 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
3689 struct umtx_shm_reg_head d;
3690 struct umtx_shm_reg *reg, *reg1;
3693 mtx_lock(&umtx_shm_lock);
3694 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
3695 mtx_unlock(&umtx_shm_lock);
3696 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
3697 TAILQ_REMOVE(&d, reg, ushm_reg_link);
3698 umtx_shm_free_reg(reg);
3702 static struct task umtx_shm_reg_delfree_task =
3703 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
3705 static struct umtx_shm_reg *
3706 umtx_shm_find_reg_locked(const struct umtx_key *key)
3708 struct umtx_shm_reg *reg;
3709 struct umtx_shm_reg_head *reg_head;
3711 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
3712 mtx_assert(&umtx_shm_lock, MA_OWNED);
3713 reg_head = &umtx_shm_registry[key->hash];
3714 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
3715 KASSERT(reg->ushm_key.shared,
3716 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
3717 if (reg->ushm_key.info.shared.object ==
3718 key->info.shared.object &&
3719 reg->ushm_key.info.shared.offset ==
3720 key->info.shared.offset) {
3721 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
3722 KASSERT(reg->ushm_refcnt > 0,
3723 ("reg %p refcnt 0 onlist", reg));
3724 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
3725 ("reg %p not linked", reg));
3733 static struct umtx_shm_reg *
3734 umtx_shm_find_reg(const struct umtx_key *key)
3736 struct umtx_shm_reg *reg;
3738 mtx_lock(&umtx_shm_lock);
3739 reg = umtx_shm_find_reg_locked(key);
3740 mtx_unlock(&umtx_shm_lock);
3745 umtx_shm_free_reg(struct umtx_shm_reg *reg)
3748 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
3749 crfree(reg->ushm_cred);
3750 shm_drop(reg->ushm_obj);
3751 uma_zfree(umtx_shm_reg_zone, reg);
3755 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
3759 mtx_assert(&umtx_shm_lock, MA_OWNED);
3760 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
3762 res = reg->ushm_refcnt == 0;
3764 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
3765 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
3766 reg, ushm_reg_link);
3767 reg->ushm_flags &= ~USHMF_REG_LINKED;
3769 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
3770 LIST_REMOVE(reg, ushm_obj_link);
3771 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
3778 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
3784 object = reg->ushm_obj->shm_object;
3785 VM_OBJECT_WLOCK(object);
3786 object->flags |= OBJ_UMTXDEAD;
3787 VM_OBJECT_WUNLOCK(object);
3789 mtx_lock(&umtx_shm_lock);
3790 dofree = umtx_shm_unref_reg_locked(reg, force);
3791 mtx_unlock(&umtx_shm_lock);
3793 umtx_shm_free_reg(reg);
3797 umtx_shm_object_init(vm_object_t object)
3800 LIST_INIT(USHM_OBJ_UMTX(object));
3804 umtx_shm_object_terminated(vm_object_t object)
3806 struct umtx_shm_reg *reg, *reg1;
3810 mtx_lock(&umtx_shm_lock);
3811 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
3812 if (umtx_shm_unref_reg_locked(reg, true)) {
3813 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
3818 mtx_unlock(&umtx_shm_lock);
3820 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
3824 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
3825 struct umtx_shm_reg **res)
3827 struct umtx_shm_reg *reg, *reg1;
3831 reg = umtx_shm_find_reg(key);
3836 cred = td->td_ucred;
3837 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
3839 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
3840 reg->ushm_refcnt = 1;
3841 bcopy(key, ®->ushm_key, sizeof(*key));
3842 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR);
3843 reg->ushm_cred = crhold(cred);
3844 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
3846 umtx_shm_free_reg(reg);
3849 mtx_lock(&umtx_shm_lock);
3850 reg1 = umtx_shm_find_reg_locked(key);
3852 mtx_unlock(&umtx_shm_lock);
3853 umtx_shm_free_reg(reg);
3858 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
3859 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
3861 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
3862 mtx_unlock(&umtx_shm_lock);
3868 umtx_shm_alive(struct thread *td, void *addr)
3871 vm_map_entry_t entry;
3878 map = &td->td_proc->p_vmspace->vm_map;
3879 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
3880 &object, &pindex, &prot, &wired);
3881 if (res != KERN_SUCCESS)
3886 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
3887 vm_map_lookup_done(map, entry);
3896 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
3897 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
3898 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
3899 for (i = 0; i < nitems(umtx_shm_registry); i++)
3900 TAILQ_INIT(&umtx_shm_registry[i]);
3904 umtx_shm(struct thread *td, void *addr, u_int flags)
3906 struct umtx_key key;
3907 struct umtx_shm_reg *reg;
3911 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
3912 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
3914 if ((flags & UMTX_SHM_ALIVE) != 0)
3915 return (umtx_shm_alive(td, addr));
3916 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
3919 KASSERT(key.shared == 1, ("non-shared key"));
3920 if ((flags & UMTX_SHM_CREAT) != 0) {
3921 error = umtx_shm_create_reg(td, &key, ®);
3923 reg = umtx_shm_find_reg(&key);
3927 umtx_key_release(&key);
3930 KASSERT(reg != NULL, ("no reg"));
3931 if ((flags & UMTX_SHM_DESTROY) != 0) {
3932 umtx_shm_unref_reg(reg, true);
3936 error = mac_posixshm_check_open(td->td_ucred,
3937 reg->ushm_obj, FFLAGS(O_RDWR));
3940 error = shm_access(reg->ushm_obj, td->td_ucred,
3944 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
3946 shm_hold(reg->ushm_obj);
3947 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
3949 td->td_retval[0] = fd;
3953 umtx_shm_unref_reg(reg, false);
3958 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap)
3961 return (umtx_shm(td, uap->uaddr1, uap->val));
3965 umtx_robust_lists(struct thread *td, struct umtx_robust_lists_params *rbp)
3968 td->td_rb_list = rbp->robust_list_offset;
3969 td->td_rbp_list = rbp->robust_priv_list_offset;
3970 td->td_rb_inact = rbp->robust_inact_offset;
3975 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap)
3977 struct umtx_robust_lists_params rb;
3980 if (uap->val > sizeof(rb))
3982 bzero(&rb, sizeof(rb));
3983 error = copyin(uap->uaddr1, &rb, uap->val);
3986 return (umtx_robust_lists(td, &rb));
3989 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3991 static const _umtx_op_func op_table[] = {
3992 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
3993 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
3994 [UMTX_OP_WAIT] = __umtx_op_wait,
3995 [UMTX_OP_WAKE] = __umtx_op_wake,
3996 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
3997 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
3998 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
3999 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4000 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4001 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4002 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4003 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4004 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4005 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4006 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4007 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4008 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4009 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4010 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4011 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4012 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4013 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4015 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4016 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4018 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4019 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4020 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4021 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4022 [UMTX_OP_SHM] = __umtx_op_shm,
4023 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4027 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4030 if ((unsigned)uap->op < nitems(op_table))
4031 return (*op_table[uap->op])(td, uap);
4035 #ifdef COMPAT_FREEBSD32
4042 struct umtx_time32 {
4043 struct timespec32 timeout;
4049 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
4051 struct timespec32 ts32;
4054 error = copyin(addr, &ts32, sizeof(struct timespec32));
4056 if (ts32.tv_sec < 0 ||
4057 ts32.tv_nsec >= 1000000000 ||
4061 tsp->tv_sec = ts32.tv_sec;
4062 tsp->tv_nsec = ts32.tv_nsec;
4069 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
4071 struct umtx_time32 t32;
4074 t32.clockid = CLOCK_REALTIME;
4076 if (size <= sizeof(struct timespec32))
4077 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
4079 error = copyin(addr, &t32, sizeof(struct umtx_time32));
4082 if (t32.timeout.tv_sec < 0 ||
4083 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
4085 tp->_timeout.tv_sec = t32.timeout.tv_sec;
4086 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
4087 tp->_flags = t32.flags;
4088 tp->_clockid = t32.clockid;
4093 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4095 struct _umtx_time *tm_p, timeout;
4098 if (uap->uaddr2 == NULL)
4101 error = umtx_copyin_umtx_time32(uap->uaddr2,
4102 (size_t)uap->uaddr1, &timeout);
4107 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
4111 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4113 struct _umtx_time *tm_p, timeout;
4116 /* Allow a null timespec (wait forever). */
4117 if (uap->uaddr2 == NULL)
4120 error = umtx_copyin_umtx_time32(uap->uaddr2,
4121 (size_t)uap->uaddr1, &timeout);
4126 return (do_lock_umutex(td, uap->obj, tm_p, 0));
4130 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
4132 struct _umtx_time *tm_p, timeout;
4135 /* Allow a null timespec (wait forever). */
4136 if (uap->uaddr2 == NULL)
4139 error = umtx_copyin_umtx_time32(uap->uaddr2,
4140 (size_t)uap->uaddr1, &timeout);
4145 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
4149 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4151 struct timespec *ts, timeout;
4154 /* Allow a null timespec (wait forever). */
4155 if (uap->uaddr2 == NULL)
4158 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
4163 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
4167 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4169 struct _umtx_time timeout;
4172 /* Allow a null timespec (wait forever). */
4173 if (uap->uaddr2 == NULL) {
4174 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
4176 error = umtx_copyin_umtx_time32(uap->uaddr2,
4177 (size_t)uap->uaddr1, &timeout);
4180 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4186 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
4188 struct _umtx_time timeout;
4191 /* Allow a null timespec (wait forever). */
4192 if (uap->uaddr2 == NULL) {
4193 error = do_rw_wrlock(td, uap->obj, 0);
4195 error = umtx_copyin_umtx_time32(uap->uaddr2,
4196 (size_t)uap->uaddr1, &timeout);
4199 error = do_rw_wrlock(td, uap->obj, &timeout);
4205 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
4207 struct _umtx_time *tm_p, timeout;
4210 if (uap->uaddr2 == NULL)
4213 error = umtx_copyin_umtx_time32(
4214 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
4219 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
4222 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4224 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4226 struct _umtx_time *tm_p, timeout;
4229 /* Allow a null timespec (wait forever). */
4230 if (uap->uaddr2 == NULL)
4233 error = umtx_copyin_umtx_time32(uap->uaddr2,
4234 (size_t)uap->uaddr1, &timeout);
4239 return (do_sem_wait(td, uap->obj, tm_p));
4244 __umtx_op_sem2_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
4246 struct _umtx_time *tm_p, timeout;
4250 /* Allow a null timespec (wait forever). */
4251 if (uap->uaddr2 == NULL) {
4255 uasize = (size_t)uap->uaddr1;
4256 error = umtx_copyin_umtx_time32(uap->uaddr2, uasize, &timeout);
4261 error = do_sem2_wait(td, uap->obj, tm_p);
4262 if (error == EINTR && uap->uaddr2 != NULL &&
4263 (timeout._flags & UMTX_ABSTIME) == 0 &&
4264 uasize >= sizeof(struct umtx_time32) + sizeof(struct timespec32)) {
4265 struct timespec32 remain32 = {
4266 .tv_sec = timeout._timeout.tv_sec,
4267 .tv_nsec = timeout._timeout.tv_nsec
4269 error = copyout(&remain32,
4270 (struct umtx_time32 *)uap->uaddr2 + 1,
4271 sizeof(struct timespec32));
4281 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
4283 uint32_t uaddrs[BATCH_SIZE], **upp;
4284 int count, error, i, pos, tocopy;
4286 upp = (uint32_t **)uap->obj;
4288 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
4290 tocopy = MIN(count, BATCH_SIZE);
4291 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
4294 for (i = 0; i < tocopy; ++i)
4295 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
4302 struct umtx_robust_lists_params_compat32 {
4303 uint32_t robust_list_offset;
4304 uint32_t robust_priv_list_offset;
4305 uint32_t robust_inact_offset;
4309 __umtx_op_robust_lists_compat32(struct thread *td, struct _umtx_op_args *uap)
4311 struct umtx_robust_lists_params rb;
4312 struct umtx_robust_lists_params_compat32 rb32;
4315 if (uap->val > sizeof(rb32))
4317 bzero(&rb, sizeof(rb));
4318 bzero(&rb32, sizeof(rb32));
4319 error = copyin(uap->uaddr1, &rb32, uap->val);
4322 rb.robust_list_offset = rb32.robust_list_offset;
4323 rb.robust_priv_list_offset = rb32.robust_priv_list_offset;
4324 rb.robust_inact_offset = rb32.robust_inact_offset;
4325 return (umtx_robust_lists(td, &rb));
4328 static const _umtx_op_func op_table_compat32[] = {
4329 [UMTX_OP_RESERVED0] = __umtx_op_unimpl,
4330 [UMTX_OP_RESERVED1] = __umtx_op_unimpl,
4331 [UMTX_OP_WAIT] = __umtx_op_wait_compat32,
4332 [UMTX_OP_WAKE] = __umtx_op_wake,
4333 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4334 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex_compat32,
4335 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4336 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4337 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait_compat32,
4338 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4339 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4340 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_compat32,
4341 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock_compat32,
4342 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock_compat32,
4343 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4344 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private_compat32,
4345 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4346 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex_compat32,
4347 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4348 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4349 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait_compat32,
4350 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4352 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4353 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4355 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private32,
4356 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4357 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait_compat32,
4358 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4359 [UMTX_OP_SHM] = __umtx_op_shm,
4360 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists_compat32,
4364 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
4367 if ((unsigned)uap->op < nitems(op_table_compat32)) {
4368 return (*op_table_compat32[uap->op])(td,
4369 (struct _umtx_op_args *)uap);
4376 umtx_thread_init(struct thread *td)
4379 td->td_umtxq = umtxq_alloc();
4380 td->td_umtxq->uq_thread = td;
4384 umtx_thread_fini(struct thread *td)
4387 umtxq_free(td->td_umtxq);
4391 * It will be called when new thread is created, e.g fork().
4394 umtx_thread_alloc(struct thread *td)
4399 uq->uq_inherited_pri = PRI_MAX;
4401 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4402 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4403 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4404 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4410 * Clear robust lists for all process' threads, not delaying the
4411 * cleanup to thread_exit hook, since the relevant address space is
4412 * destroyed right now.
4415 umtx_exec_hook(void *arg __unused, struct proc *p,
4416 struct image_params *imgp __unused)
4420 KASSERT(p == curproc, ("need curproc"));
4422 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4423 (p->p_flag & P_STOPPED_SINGLE) != 0,
4424 ("curproc must be single-threaded"));
4425 FOREACH_THREAD_IN_PROC(p, td) {
4426 KASSERT(td == curthread ||
4427 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4428 ("running thread %p %p", p, td));
4430 umtx_thread_cleanup(td);
4432 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4438 * thread_exit() hook.
4441 umtx_thread_exit(struct thread *td)
4444 umtx_thread_cleanup(td);
4448 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res)
4451 #ifdef COMPAT_FREEBSD32
4456 #ifdef COMPAT_FREEBSD32
4457 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4458 error = fueword32((void *)ptr, &res32);
4464 error = fueword((void *)ptr, &res1);
4474 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list)
4476 #ifdef COMPAT_FREEBSD32
4477 struct umutex32 m32;
4479 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
4480 memcpy(&m32, m, sizeof(m32));
4481 *rb_list = m32.m_rb_lnk;
4484 *rb_list = m->m_rb_lnk;
4488 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact)
4493 KASSERT(td->td_proc == curproc, ("need current vmspace"));
4494 error = copyin((void *)rbp, &m, sizeof(m));
4497 if (rb_list != NULL)
4498 umtx_read_rb_list(td, &m, rb_list);
4499 if ((m.m_flags & UMUTEX_ROBUST) == 0)
4501 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
4502 /* inact is cleared after unlock, allow the inconsistency */
4503 return (inact ? 0 : EINVAL);
4504 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
4508 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
4517 error = umtx_read_uptr(td, rb_list, &rbp);
4518 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
4519 if (rbp == *rb_inact) {
4524 error = umtx_handle_rb(td, rbp, &rbp, inact);
4526 if (i == umtx_max_rb && umtx_verbose_rb) {
4527 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
4528 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
4530 if (error != 0 && umtx_verbose_rb) {
4531 uprintf("comm %s pid %d: handling %srb error %d\n",
4532 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
4537 * Clean up umtx data.
4540 umtx_thread_cleanup(struct thread *td)
4547 * Disown pi mutexes.
4551 mtx_lock(&umtx_lock);
4552 uq->uq_inherited_pri = PRI_MAX;
4553 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4554 pi->pi_owner = NULL;
4555 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4557 mtx_unlock(&umtx_lock);
4559 sched_lend_user_prio(td, PRI_MAX);
4564 * Handle terminated robust mutexes. Must be done after
4565 * robust pi disown, otherwise unlock could see unowned
4568 rb_inact = td->td_rb_inact;
4570 (void)umtx_read_uptr(td, rb_inact, &rb_inact);
4571 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "");
4572 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ");
4574 (void)umtx_handle_rb(td, rb_inact, NULL, true);