2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_umtx_profiling.h"
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
43 #include <sys/filedesc.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/taskqueue.h>
64 #include <sys/eventhandler.h>
66 #include <sys/umtxvar.h>
68 #include <security/mac/mac_framework.h>
71 #include <vm/vm_param.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_object.h>
76 #include <machine/atomic.h>
77 #include <machine/cpu.h>
79 #include <compat/freebsd32/freebsd32.h>
80 #ifdef COMPAT_FREEBSD32
81 #include <compat/freebsd32/freebsd32_proto.h>
85 #define _UMUTEX_WAIT 2
88 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
89 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
92 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
94 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do { \
95 struct umtxq_chain *uc; \
97 uc = umtxq_getchain(key); \
98 mtx_assert(&uc->uc_lock, MA_OWNED); \
99 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); \
102 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do {} while (0)
106 * Don't propagate time-sharing priority, there is a security reason,
107 * a user can simply introduce PI-mutex, let thread A lock the mutex,
108 * and let another thread B block on the mutex, because B is
109 * sleeping, its priority will be boosted, this causes A's priority to
110 * be boosted via priority propagating too and will never be lowered even
111 * if it is using 100%CPU, this is unfair to other processes.
114 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
115 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
116 PRI_MAX_TIMESHARE : (td)->td_user_pri)
118 #define GOLDEN_RATIO_PRIME 2654404609U
120 #define UMTX_CHAINS 512
122 #define UMTX_SHIFTS (__WORD_BIT - 9)
124 #define GET_SHARE(flags) \
125 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
127 #define BUSY_SPINS 200
129 struct umtx_copyops {
130 int (*copyin_timeout)(const void *uaddr, struct timespec *tsp);
131 int (*copyin_umtx_time)(const void *uaddr, size_t size,
132 struct _umtx_time *tp);
133 int (*copyin_robust_lists)(const void *uaddr, size_t size,
134 struct umtx_robust_lists_params *rbp);
135 int (*copyout_timeout)(void *uaddr, size_t size,
136 struct timespec *tsp);
137 const size_t timespec_sz;
138 const size_t umtx_time_sz;
142 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
143 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
144 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
146 int umtx_shm_vnobj_persistent = 0;
147 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
148 &umtx_shm_vnobj_persistent, 0,
149 "False forces destruction of umtx attached to file, on last close");
150 static int umtx_max_rb = 1000;
151 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
153 "Maximum number of robust mutexes allowed for each thread");
155 static uma_zone_t umtx_pi_zone;
156 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
157 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
158 static int umtx_pi_allocated;
160 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
162 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
163 &umtx_pi_allocated, 0, "Allocated umtx_pi");
164 static int umtx_verbose_rb = 1;
165 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
169 #ifdef UMTX_PROFILING
170 static long max_length;
171 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
172 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
176 static inline void umtx_abs_timeout_init2(struct umtx_abs_timeout *timo,
177 const struct _umtx_time *umtxtime);
179 static void umtx_shm_init(void);
180 static void umtxq_sysinit(void *);
181 static void umtxq_hash(struct umtx_key *key);
182 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
184 static void umtx_thread_cleanup(struct thread *td);
185 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
187 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
189 static struct mtx umtx_lock;
191 #ifdef UMTX_PROFILING
193 umtx_init_profiling(void)
195 struct sysctl_oid *chain_oid;
199 for (i = 0; i < UMTX_CHAINS; ++i) {
200 snprintf(chain_name, sizeof(chain_name), "%d", i);
201 chain_oid = SYSCTL_ADD_NODE(NULL,
202 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
203 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
205 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
206 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
207 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
208 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
213 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
217 struct umtxq_chain *uc;
218 u_int fract, i, j, tot, whole;
219 u_int sf0, sf1, sf2, sf3, sf4;
220 u_int si0, si1, si2, si3, si4;
221 u_int sw0, sw1, sw2, sw3, sw4;
223 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
224 for (i = 0; i < 2; i++) {
226 for (j = 0; j < UMTX_CHAINS; ++j) {
227 uc = &umtxq_chains[i][j];
228 mtx_lock(&uc->uc_lock);
229 tot += uc->max_length;
230 mtx_unlock(&uc->uc_lock);
233 sbuf_printf(&sb, "%u) Empty ", i);
235 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
236 si0 = si1 = si2 = si3 = si4 = 0;
237 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
238 for (j = 0; j < UMTX_CHAINS; j++) {
239 uc = &umtxq_chains[i][j];
240 mtx_lock(&uc->uc_lock);
241 whole = uc->max_length * 100;
242 mtx_unlock(&uc->uc_lock);
243 fract = (whole % tot) * 100;
244 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
248 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
253 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
258 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
263 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
270 sbuf_printf(&sb, "queue %u:\n", i);
271 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
273 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
275 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
277 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
279 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
285 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
291 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
293 struct umtxq_chain *uc;
298 error = sysctl_handle_int(oidp, &clear, 0, req);
299 if (error != 0 || req->newptr == NULL)
303 for (i = 0; i < 2; ++i) {
304 for (j = 0; j < UMTX_CHAINS; ++j) {
305 uc = &umtxq_chains[i][j];
306 mtx_lock(&uc->uc_lock);
309 mtx_unlock(&uc->uc_lock);
316 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
317 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
318 sysctl_debug_umtx_chains_clear, "I",
319 "Clear umtx chains statistics");
320 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
321 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
322 sysctl_debug_umtx_chains_peaks, "A",
323 "Highest peaks in chains max length");
327 umtxq_sysinit(void *arg __unused)
331 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
332 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
333 for (i = 0; i < 2; ++i) {
334 for (j = 0; j < UMTX_CHAINS; ++j) {
335 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
336 MTX_DEF | MTX_DUPOK);
337 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
338 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
339 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
340 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
341 umtxq_chains[i][j].uc_busy = 0;
342 umtxq_chains[i][j].uc_waiters = 0;
343 #ifdef UMTX_PROFILING
344 umtxq_chains[i][j].length = 0;
345 umtxq_chains[i][j].max_length = 0;
349 #ifdef UMTX_PROFILING
350 umtx_init_profiling();
352 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
361 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
362 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
364 TAILQ_INIT(&uq->uq_spare_queue->head);
365 TAILQ_INIT(&uq->uq_pi_contested);
366 uq->uq_inherited_pri = PRI_MAX;
371 umtxq_free(struct umtx_q *uq)
374 MPASS(uq->uq_spare_queue != NULL);
375 free(uq->uq_spare_queue, M_UMTX);
380 umtxq_hash(struct umtx_key *key)
384 n = (uintptr_t)key->info.both.a + key->info.both.b;
385 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
389 umtxq_getchain(struct umtx_key *key)
392 if (key->type <= TYPE_SEM)
393 return (&umtxq_chains[1][key->hash]);
394 return (&umtxq_chains[0][key->hash]);
398 * Set chain to busy state when following operation
399 * may be blocked (kernel mutex can not be used).
402 umtxq_busy(struct umtx_key *key)
404 struct umtxq_chain *uc;
406 uc = umtxq_getchain(key);
407 mtx_assert(&uc->uc_lock, MA_OWNED);
411 int count = BUSY_SPINS;
414 while (uc->uc_busy && --count > 0)
420 while (uc->uc_busy) {
422 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
433 umtxq_unbusy(struct umtx_key *key)
435 struct umtxq_chain *uc;
437 uc = umtxq_getchain(key);
438 mtx_assert(&uc->uc_lock, MA_OWNED);
439 KASSERT(uc->uc_busy != 0, ("not busy"));
446 umtxq_unbusy_unlocked(struct umtx_key *key)
454 static struct umtxq_queue *
455 umtxq_queue_lookup(struct umtx_key *key, int q)
457 struct umtxq_queue *uh;
458 struct umtxq_chain *uc;
460 uc = umtxq_getchain(key);
461 UMTXQ_LOCKED_ASSERT(uc);
462 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
463 if (umtx_key_match(&uh->key, key))
471 umtxq_insert_queue(struct umtx_q *uq, int q)
473 struct umtxq_queue *uh;
474 struct umtxq_chain *uc;
476 uc = umtxq_getchain(&uq->uq_key);
477 UMTXQ_LOCKED_ASSERT(uc);
478 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
479 uh = umtxq_queue_lookup(&uq->uq_key, q);
481 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
483 uh = uq->uq_spare_queue;
484 uh->key = uq->uq_key;
485 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
486 #ifdef UMTX_PROFILING
488 if (uc->length > uc->max_length) {
489 uc->max_length = uc->length;
490 if (uc->max_length > max_length)
491 max_length = uc->max_length;
495 uq->uq_spare_queue = NULL;
497 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
499 uq->uq_flags |= UQF_UMTXQ;
500 uq->uq_cur_queue = uh;
505 umtxq_remove_queue(struct umtx_q *uq, int q)
507 struct umtxq_chain *uc;
508 struct umtxq_queue *uh;
510 uc = umtxq_getchain(&uq->uq_key);
511 UMTXQ_LOCKED_ASSERT(uc);
512 if (uq->uq_flags & UQF_UMTXQ) {
513 uh = uq->uq_cur_queue;
514 TAILQ_REMOVE(&uh->head, uq, uq_link);
516 uq->uq_flags &= ~UQF_UMTXQ;
517 if (TAILQ_EMPTY(&uh->head)) {
518 KASSERT(uh->length == 0,
519 ("inconsistent umtxq_queue length"));
520 #ifdef UMTX_PROFILING
523 LIST_REMOVE(uh, link);
525 uh = LIST_FIRST(&uc->uc_spare_queue);
526 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
527 LIST_REMOVE(uh, link);
529 uq->uq_spare_queue = uh;
530 uq->uq_cur_queue = NULL;
535 * Check if there are multiple waiters
538 umtxq_count(struct umtx_key *key)
540 struct umtxq_queue *uh;
542 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
543 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
550 * Check if there are multiple PI waiters and returns first
554 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
556 struct umtxq_queue *uh;
559 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
560 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
562 *first = TAILQ_FIRST(&uh->head);
569 * Wake up threads waiting on an userland object by a bit mask.
572 umtxq_signal_mask(struct umtx_key *key, int n_wake, u_int bitset)
574 struct umtxq_queue *uh;
575 struct umtx_q *uq, *uq_temp;
579 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
580 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
583 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) {
584 if ((uq->uq_bitset & bitset) == 0)
586 umtxq_remove_queue(uq, UMTX_SHARED_QUEUE);
595 * Wake up threads waiting on an userland object.
599 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
601 struct umtxq_queue *uh;
606 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
607 uh = umtxq_queue_lookup(key, q);
609 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
610 umtxq_remove_queue(uq, q);
620 * Wake up specified thread.
623 umtxq_signal_thread(struct umtx_q *uq)
626 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
632 * Wake up a maximum of n_wake threads that are waiting on an userland
633 * object identified by key. The remaining threads are removed from queue
634 * identified by key and added to the queue identified by key2 (requeued).
635 * The n_requeue specifies an upper limit on the number of threads that
636 * are requeued to the second queue.
639 umtxq_requeue(struct umtx_key *key, int n_wake, struct umtx_key *key2,
642 struct umtxq_queue *uh;
643 struct umtx_q *uq, *uq_temp;
647 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
648 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key2));
649 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
652 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) {
653 if (++ret <= n_wake) {
660 if (ret - n_wake == n_requeue)
668 tstohz(const struct timespec *tsp)
672 TIMESPEC_TO_TIMEVAL(&tv, tsp);
677 umtx_abs_timeout_init(struct umtx_abs_timeout *timo, int clockid,
678 int absolute, const struct timespec *timeout)
681 timo->clockid = clockid;
683 timo->is_abs_real = false;
684 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
685 timespecadd(&timo->cur, timeout, &timo->end);
687 timo->end = *timeout;
688 timo->is_abs_real = clockid == CLOCK_REALTIME ||
689 clockid == CLOCK_REALTIME_FAST ||
690 clockid == CLOCK_REALTIME_PRECISE ||
691 clockid == CLOCK_SECOND;
696 umtx_abs_timeout_init2(struct umtx_abs_timeout *timo,
697 const struct _umtx_time *umtxtime)
700 umtx_abs_timeout_init(timo, umtxtime->_clockid,
701 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
705 umtx_abs_timeout_getsbt(struct umtx_abs_timeout *timo, sbintime_t *sbt,
708 struct bintime bt, bbt;
711 switch (timo->clockid) {
713 /* Clocks that can be converted into absolute time. */
715 case CLOCK_REALTIME_PRECISE:
716 case CLOCK_REALTIME_FAST:
717 case CLOCK_MONOTONIC:
718 case CLOCK_MONOTONIC_PRECISE:
719 case CLOCK_MONOTONIC_FAST:
721 case CLOCK_UPTIME_PRECISE:
722 case CLOCK_UPTIME_FAST:
724 timespec2bintime(&timo->end, &bt);
725 switch (timo->clockid) {
727 case CLOCK_REALTIME_PRECISE:
728 case CLOCK_REALTIME_FAST:
730 getboottimebin(&bbt);
731 bintime_sub(&bt, &bbt);
736 if (bt.sec >= (SBT_MAX >> 32)) {
742 switch (timo->clockid) {
743 case CLOCK_REALTIME_FAST:
744 case CLOCK_MONOTONIC_FAST:
745 case CLOCK_UPTIME_FAST:
755 /* Clocks that has to be periodically polled. */
758 case CLOCK_THREAD_CPUTIME_ID:
759 case CLOCK_PROCESS_CPUTIME_ID:
761 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
762 if (timespeccmp(&timo->end, &timo->cur, <=))
764 timespecsub(&timo->end, &timo->cur, &tts);
765 *sbt = tick_sbt * tstohz(&tts);
766 *flags = C_HARDCLOCK;
772 umtx_unlock_val(uint32_t flags, bool rb)
776 return (UMUTEX_RB_OWNERDEAD);
777 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
778 return (UMUTEX_RB_NOTRECOV);
780 return (UMUTEX_UNOWNED);
785 * Put thread into sleep state, before sleeping, check if
786 * thread was removed from umtx queue.
789 umtxq_sleep(struct umtx_q *uq, const char *wmesg,
790 struct umtx_abs_timeout *timo)
792 struct umtxq_chain *uc;
794 int error, flags = 0;
796 uc = umtxq_getchain(&uq->uq_key);
797 UMTXQ_LOCKED_ASSERT(uc);
799 if (!(uq->uq_flags & UQF_UMTXQ)) {
804 if (timo->is_abs_real)
805 curthread->td_rtcgen =
806 atomic_load_acq_int(&rtc_generation);
807 error = umtx_abs_timeout_getsbt(timo, &sbt, &flags);
811 error = msleep_sbt(uq, &uc->uc_lock, PCATCH, wmesg,
813 if (error == EINTR || error == ERESTART)
815 if (error == EWOULDBLOCK && (flags & C_ABSOLUTE) != 0) {
821 curthread->td_rtcgen = 0;
826 * Convert userspace address into unique logical address.
829 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
831 struct thread *td = curthread;
833 vm_map_entry_t entry;
839 if (share == THREAD_SHARE) {
841 key->info.private.vs = td->td_proc->p_vmspace;
842 key->info.private.addr = (uintptr_t)addr;
844 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
845 map = &td->td_proc->p_vmspace->vm_map;
846 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
847 &entry, &key->info.shared.object, &pindex, &prot,
848 &wired) != KERN_SUCCESS) {
852 if ((share == PROCESS_SHARE) ||
853 (share == AUTO_SHARE &&
854 VM_INHERIT_SHARE == entry->inheritance)) {
856 key->info.shared.offset = (vm_offset_t)addr -
857 entry->start + entry->offset;
858 vm_object_reference(key->info.shared.object);
861 key->info.private.vs = td->td_proc->p_vmspace;
862 key->info.private.addr = (uintptr_t)addr;
864 vm_map_lookup_done(map, entry);
875 umtx_key_release(struct umtx_key *key)
878 vm_object_deallocate(key->info.shared.object);
881 #ifdef COMPAT_FREEBSD10
883 * Lock a umtx object.
886 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id,
887 const struct timespec *timeout)
889 struct umtx_abs_timeout timo;
897 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
900 * Care must be exercised when dealing with umtx structure. It
901 * can fault on any access.
905 * Try the uncontested case. This should be done in userland.
907 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id);
909 /* The acquire succeeded. */
910 if (owner == UMTX_UNOWNED)
913 /* The address was invalid. */
917 /* If no one owns it but it is contested try to acquire it. */
918 if (owner == UMTX_CONTESTED) {
919 owner = casuword(&umtx->u_owner,
920 UMTX_CONTESTED, id | UMTX_CONTESTED);
922 if (owner == UMTX_CONTESTED)
925 /* The address was invalid. */
929 error = thread_check_susp(td, false);
933 /* If this failed the lock has changed, restart. */
938 * If we caught a signal, we have retried and now
944 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK,
945 AUTO_SHARE, &uq->uq_key)) != 0)
948 umtxq_lock(&uq->uq_key);
949 umtxq_busy(&uq->uq_key);
951 umtxq_unbusy(&uq->uq_key);
952 umtxq_unlock(&uq->uq_key);
955 * Set the contested bit so that a release in user space
956 * knows to use the system call for unlock. If this fails
957 * either some one else has acquired the lock or it has been
960 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
962 /* The address was invalid. */
964 umtxq_lock(&uq->uq_key);
966 umtxq_unlock(&uq->uq_key);
967 umtx_key_release(&uq->uq_key);
972 * We set the contested bit, sleep. Otherwise the lock changed
973 * and we need to retry or we lost a race to the thread
974 * unlocking the umtx.
976 umtxq_lock(&uq->uq_key);
978 error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL :
981 umtxq_unlock(&uq->uq_key);
982 umtx_key_release(&uq->uq_key);
985 error = thread_check_susp(td, false);
988 if (timeout == NULL) {
989 /* Mutex locking is restarted if it is interrupted. */
993 /* Timed-locking is not restarted. */
994 if (error == ERESTART)
1001 * Unlock a umtx object.
1004 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
1006 struct umtx_key key;
1013 * Make sure we own this mtx.
1015 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
1019 if ((owner & ~UMTX_CONTESTED) != id)
1022 /* This should be done in userland */
1023 if ((owner & UMTX_CONTESTED) == 0) {
1024 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
1032 /* We should only ever be in here for contested locks */
1033 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1039 count = umtxq_count(&key);
1043 * When unlocking the umtx, it must be marked as unowned if
1044 * there is zero or one thread only waiting for it.
1045 * Otherwise, it must be marked as contested.
1047 old = casuword(&umtx->u_owner, owner,
1048 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
1050 umtxq_signal(&key,1);
1053 umtx_key_release(&key);
1061 #ifdef COMPAT_FREEBSD32
1064 * Lock a umtx object.
1067 do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id,
1068 const struct timespec *timeout)
1070 struct umtx_abs_timeout timo;
1078 if (timeout != NULL)
1079 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
1082 * Care must be exercised when dealing with umtx structure. It
1083 * can fault on any access.
1087 * Try the uncontested case. This should be done in userland.
1089 owner = casuword32(m, UMUTEX_UNOWNED, id);
1091 /* The acquire succeeded. */
1092 if (owner == UMUTEX_UNOWNED)
1095 /* The address was invalid. */
1099 /* If no one owns it but it is contested try to acquire it. */
1100 if (owner == UMUTEX_CONTESTED) {
1101 owner = casuword32(m,
1102 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1103 if (owner == UMUTEX_CONTESTED)
1106 /* The address was invalid. */
1110 error = thread_check_susp(td, false);
1114 /* If this failed the lock has changed, restart. */
1119 * If we caught a signal, we have retried and now
1125 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK,
1126 AUTO_SHARE, &uq->uq_key)) != 0)
1129 umtxq_lock(&uq->uq_key);
1130 umtxq_busy(&uq->uq_key);
1132 umtxq_unbusy(&uq->uq_key);
1133 umtxq_unlock(&uq->uq_key);
1136 * Set the contested bit so that a release in user space
1137 * knows to use the system call for unlock. If this fails
1138 * either some one else has acquired the lock or it has been
1141 old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
1143 /* The address was invalid. */
1145 umtxq_lock(&uq->uq_key);
1147 umtxq_unlock(&uq->uq_key);
1148 umtx_key_release(&uq->uq_key);
1153 * We set the contested bit, sleep. Otherwise the lock changed
1154 * and we need to retry or we lost a race to the thread
1155 * unlocking the umtx.
1157 umtxq_lock(&uq->uq_key);
1159 error = umtxq_sleep(uq, "umtx", timeout == NULL ?
1162 umtxq_unlock(&uq->uq_key);
1163 umtx_key_release(&uq->uq_key);
1166 error = thread_check_susp(td, false);
1169 if (timeout == NULL) {
1170 /* Mutex locking is restarted if it is interrupted. */
1174 /* Timed-locking is not restarted. */
1175 if (error == ERESTART)
1182 * Unlock a umtx object.
1185 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id)
1187 struct umtx_key key;
1194 * Make sure we own this mtx.
1196 owner = fuword32(m);
1200 if ((owner & ~UMUTEX_CONTESTED) != id)
1203 /* This should be done in userland */
1204 if ((owner & UMUTEX_CONTESTED) == 0) {
1205 old = casuword32(m, owner, UMUTEX_UNOWNED);
1213 /* We should only ever be in here for contested locks */
1214 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1220 count = umtxq_count(&key);
1224 * When unlocking the umtx, it must be marked as unowned if
1225 * there is zero or one thread only waiting for it.
1226 * Otherwise, it must be marked as contested.
1228 old = casuword32(m, owner,
1229 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1231 umtxq_signal(&key,1);
1234 umtx_key_release(&key);
1241 #endif /* COMPAT_FREEBSD32 */
1242 #endif /* COMPAT_FREEBSD10 */
1245 * Fetch and compare value, sleep on the address if value is not changed.
1248 do_wait(struct thread *td, void *addr, u_long id,
1249 struct _umtx_time *timeout, int compat32, int is_private)
1251 struct umtx_abs_timeout timo;
1258 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
1259 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1262 if (timeout != NULL)
1263 umtx_abs_timeout_init2(&timo, timeout);
1265 umtxq_lock(&uq->uq_key);
1267 umtxq_unlock(&uq->uq_key);
1268 if (compat32 == 0) {
1269 error = fueword(addr, &tmp);
1273 error = fueword32(addr, &tmp32);
1279 umtxq_lock(&uq->uq_key);
1282 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
1284 if ((uq->uq_flags & UQF_UMTXQ) == 0)
1288 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
1291 umtxq_unlock(&uq->uq_key);
1292 umtx_key_release(&uq->uq_key);
1293 if (error == ERESTART)
1299 * Wake up threads sleeping on the specified address.
1302 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1304 struct umtx_key key;
1307 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1308 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1311 umtxq_signal(&key, n_wake);
1313 umtx_key_release(&key);
1318 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1321 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1322 struct _umtx_time *timeout, int mode)
1324 struct umtx_abs_timeout timo;
1326 uint32_t owner, old, id;
1332 if (timeout != NULL)
1333 umtx_abs_timeout_init2(&timo, timeout);
1336 * Care must be exercised when dealing with umtx structure. It
1337 * can fault on any access.
1340 rv = fueword32(&m->m_owner, &owner);
1343 if (mode == _UMUTEX_WAIT) {
1344 if (owner == UMUTEX_UNOWNED ||
1345 owner == UMUTEX_CONTESTED ||
1346 owner == UMUTEX_RB_OWNERDEAD ||
1347 owner == UMUTEX_RB_NOTRECOV)
1351 * Robust mutex terminated. Kernel duty is to
1352 * return EOWNERDEAD to the userspace. The
1353 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1354 * by the common userspace code.
1356 if (owner == UMUTEX_RB_OWNERDEAD) {
1357 rv = casueword32(&m->m_owner,
1358 UMUTEX_RB_OWNERDEAD, &owner,
1359 id | UMUTEX_CONTESTED);
1363 MPASS(owner == UMUTEX_RB_OWNERDEAD);
1364 return (EOWNERDEAD); /* success */
1367 rv = thread_check_susp(td, false);
1372 if (owner == UMUTEX_RB_NOTRECOV)
1373 return (ENOTRECOVERABLE);
1376 * Try the uncontested case. This should be
1379 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1381 /* The address was invalid. */
1385 /* The acquire succeeded. */
1387 MPASS(owner == UMUTEX_UNOWNED);
1392 * If no one owns it but it is contested try
1396 if (owner == UMUTEX_CONTESTED) {
1397 rv = casueword32(&m->m_owner,
1398 UMUTEX_CONTESTED, &owner,
1399 id | UMUTEX_CONTESTED);
1400 /* The address was invalid. */
1404 MPASS(owner == UMUTEX_CONTESTED);
1408 rv = thread_check_susp(td, false);
1414 * If this failed the lock has
1420 /* rv == 1 but not contested, likely store failure */
1421 rv = thread_check_susp(td, false);
1426 if (mode == _UMUTEX_TRY)
1430 * If we caught a signal, we have retried and now
1436 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1437 GET_SHARE(flags), &uq->uq_key)) != 0)
1440 umtxq_lock(&uq->uq_key);
1441 umtxq_busy(&uq->uq_key);
1443 umtxq_unlock(&uq->uq_key);
1446 * Set the contested bit so that a release in user space
1447 * knows to use the system call for unlock. If this fails
1448 * either some one else has acquired the lock or it has been
1451 rv = casueword32(&m->m_owner, owner, &old,
1452 owner | UMUTEX_CONTESTED);
1454 /* The address was invalid or casueword failed to store. */
1455 if (rv == -1 || rv == 1) {
1456 umtxq_lock(&uq->uq_key);
1458 umtxq_unbusy(&uq->uq_key);
1459 umtxq_unlock(&uq->uq_key);
1460 umtx_key_release(&uq->uq_key);
1464 rv = thread_check_susp(td, false);
1472 * We set the contested bit, sleep. Otherwise the lock changed
1473 * and we need to retry or we lost a race to the thread
1474 * unlocking the umtx.
1476 umtxq_lock(&uq->uq_key);
1477 umtxq_unbusy(&uq->uq_key);
1478 MPASS(old == owner);
1479 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1482 umtxq_unlock(&uq->uq_key);
1483 umtx_key_release(&uq->uq_key);
1486 error = thread_check_susp(td, false);
1493 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1496 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1498 struct umtx_key key;
1499 uint32_t owner, old, id, newlock;
1506 * Make sure we own this mtx.
1508 error = fueword32(&m->m_owner, &owner);
1512 if ((owner & ~UMUTEX_CONTESTED) != id)
1515 newlock = umtx_unlock_val(flags, rb);
1516 if ((owner & UMUTEX_CONTESTED) == 0) {
1517 error = casueword32(&m->m_owner, owner, &old, newlock);
1521 error = thread_check_susp(td, false);
1526 MPASS(old == owner);
1530 /* We should only ever be in here for contested locks */
1531 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1537 count = umtxq_count(&key);
1541 * When unlocking the umtx, it must be marked as unowned if
1542 * there is zero or one thread only waiting for it.
1543 * Otherwise, it must be marked as contested.
1546 newlock |= UMUTEX_CONTESTED;
1547 error = casueword32(&m->m_owner, owner, &old, newlock);
1549 umtxq_signal(&key, 1);
1552 umtx_key_release(&key);
1558 error = thread_check_susp(td, false);
1567 * Check if the mutex is available and wake up a waiter,
1568 * only for simple mutex.
1571 do_wake_umutex(struct thread *td, struct umutex *m)
1573 struct umtx_key key;
1580 error = fueword32(&m->m_owner, &owner);
1584 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1585 owner != UMUTEX_RB_NOTRECOV)
1588 error = fueword32(&m->m_flags, &flags);
1592 /* We should only ever be in here for contested locks */
1593 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1599 count = umtxq_count(&key);
1602 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1603 owner != UMUTEX_RB_NOTRECOV) {
1604 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1608 } else if (error == 1) {
1612 umtx_key_release(&key);
1613 error = thread_check_susp(td, false);
1621 if (error == 0 && count != 0) {
1622 MPASS((owner & ~UMUTEX_CONTESTED) == 0 ||
1623 owner == UMUTEX_RB_OWNERDEAD ||
1624 owner == UMUTEX_RB_NOTRECOV);
1625 umtxq_signal(&key, 1);
1629 umtx_key_release(&key);
1634 * Check if the mutex has waiters and tries to fix contention bit.
1637 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1639 struct umtx_key key;
1640 uint32_t owner, old;
1645 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1649 type = TYPE_NORMAL_UMUTEX;
1651 case UMUTEX_PRIO_INHERIT:
1652 type = TYPE_PI_UMUTEX;
1654 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1655 type = TYPE_PI_ROBUST_UMUTEX;
1657 case UMUTEX_PRIO_PROTECT:
1658 type = TYPE_PP_UMUTEX;
1660 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1661 type = TYPE_PP_ROBUST_UMUTEX;
1666 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1672 count = umtxq_count(&key);
1675 error = fueword32(&m->m_owner, &owner);
1680 * Only repair contention bit if there is a waiter, this means
1681 * the mutex is still being referenced by userland code,
1682 * otherwise don't update any memory.
1684 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 &&
1685 (count > 1 || (count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) {
1686 error = casueword32(&m->m_owner, owner, &old,
1687 owner | UMUTEX_CONTESTED);
1693 MPASS(old == owner);
1697 error = thread_check_susp(td, false);
1701 if (error == EFAULT) {
1702 umtxq_signal(&key, INT_MAX);
1703 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1704 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1705 umtxq_signal(&key, 1);
1708 umtx_key_release(&key);
1713 umtx_pi_alloc(int flags)
1717 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1718 TAILQ_INIT(&pi->pi_blocked);
1719 atomic_add_int(&umtx_pi_allocated, 1);
1724 umtx_pi_free(struct umtx_pi *pi)
1726 uma_zfree(umtx_pi_zone, pi);
1727 atomic_add_int(&umtx_pi_allocated, -1);
1731 * Adjust the thread's position on a pi_state after its priority has been
1735 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1737 struct umtx_q *uq, *uq1, *uq2;
1740 mtx_assert(&umtx_lock, MA_OWNED);
1747 * Check if the thread needs to be moved on the blocked chain.
1748 * It needs to be moved if either its priority is lower than
1749 * the previous thread or higher than the next thread.
1751 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1752 uq2 = TAILQ_NEXT(uq, uq_lockq);
1753 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1754 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1756 * Remove thread from blocked chain and determine where
1757 * it should be moved to.
1759 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1760 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1761 td1 = uq1->uq_thread;
1762 MPASS(td1->td_proc->p_magic == P_MAGIC);
1763 if (UPRI(td1) > UPRI(td))
1768 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1770 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1775 static struct umtx_pi *
1776 umtx_pi_next(struct umtx_pi *pi)
1778 struct umtx_q *uq_owner;
1780 if (pi->pi_owner == NULL)
1782 uq_owner = pi->pi_owner->td_umtxq;
1783 if (uq_owner == NULL)
1785 return (uq_owner->uq_pi_blocked);
1789 * Floyd's Cycle-Finding Algorithm.
1792 umtx_pi_check_loop(struct umtx_pi *pi)
1794 struct umtx_pi *pi1; /* fast iterator */
1796 mtx_assert(&umtx_lock, MA_OWNED);
1801 pi = umtx_pi_next(pi);
1804 pi1 = umtx_pi_next(pi1);
1807 pi1 = umtx_pi_next(pi1);
1817 * Propagate priority when a thread is blocked on POSIX
1821 umtx_propagate_priority(struct thread *td)
1827 mtx_assert(&umtx_lock, MA_OWNED);
1830 pi = uq->uq_pi_blocked;
1833 if (umtx_pi_check_loop(pi))
1838 if (td == NULL || td == curthread)
1841 MPASS(td->td_proc != NULL);
1842 MPASS(td->td_proc->p_magic == P_MAGIC);
1845 if (td->td_lend_user_pri > pri)
1846 sched_lend_user_prio(td, pri);
1854 * Pick up the lock that td is blocked on.
1857 pi = uq->uq_pi_blocked;
1860 /* Resort td on the list if needed. */
1861 umtx_pi_adjust_thread(pi, td);
1866 * Unpropagate priority for a PI mutex when a thread blocked on
1867 * it is interrupted by signal or resumed by others.
1870 umtx_repropagate_priority(struct umtx_pi *pi)
1872 struct umtx_q *uq, *uq_owner;
1873 struct umtx_pi *pi2;
1876 mtx_assert(&umtx_lock, MA_OWNED);
1878 if (umtx_pi_check_loop(pi))
1880 while (pi != NULL && pi->pi_owner != NULL) {
1882 uq_owner = pi->pi_owner->td_umtxq;
1884 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1885 uq = TAILQ_FIRST(&pi2->pi_blocked);
1887 if (pri > UPRI(uq->uq_thread))
1888 pri = UPRI(uq->uq_thread);
1892 if (pri > uq_owner->uq_inherited_pri)
1893 pri = uq_owner->uq_inherited_pri;
1894 thread_lock(pi->pi_owner);
1895 sched_lend_user_prio(pi->pi_owner, pri);
1896 thread_unlock(pi->pi_owner);
1897 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1898 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1903 * Insert a PI mutex into owned list.
1906 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1908 struct umtx_q *uq_owner;
1910 uq_owner = owner->td_umtxq;
1911 mtx_assert(&umtx_lock, MA_OWNED);
1912 MPASS(pi->pi_owner == NULL);
1913 pi->pi_owner = owner;
1914 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1918 * Disown a PI mutex, and remove it from the owned list.
1921 umtx_pi_disown(struct umtx_pi *pi)
1924 mtx_assert(&umtx_lock, MA_OWNED);
1925 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1926 pi->pi_owner = NULL;
1930 * Claim ownership of a PI mutex.
1933 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1938 mtx_lock(&umtx_lock);
1939 if (pi->pi_owner == owner) {
1940 mtx_unlock(&umtx_lock);
1944 if (pi->pi_owner != NULL) {
1946 * userland may have already messed the mutex, sigh.
1948 mtx_unlock(&umtx_lock);
1951 umtx_pi_setowner(pi, owner);
1952 uq = TAILQ_FIRST(&pi->pi_blocked);
1954 pri = UPRI(uq->uq_thread);
1956 if (pri < UPRI(owner))
1957 sched_lend_user_prio(owner, pri);
1958 thread_unlock(owner);
1960 mtx_unlock(&umtx_lock);
1965 * Adjust a thread's order position in its blocked PI mutex,
1966 * this may result new priority propagating process.
1969 umtx_pi_adjust(struct thread *td, u_char oldpri)
1975 mtx_lock(&umtx_lock);
1977 * Pick up the lock that td is blocked on.
1979 pi = uq->uq_pi_blocked;
1981 umtx_pi_adjust_thread(pi, td);
1982 umtx_repropagate_priority(pi);
1984 mtx_unlock(&umtx_lock);
1988 * Sleep on a PI mutex.
1991 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1992 const char *wmesg, struct umtx_abs_timeout *timo, bool shared)
1994 struct thread *td, *td1;
1998 struct umtxq_chain *uc;
2000 uc = umtxq_getchain(&pi->pi_key);
2004 KASSERT(td == curthread, ("inconsistent uq_thread"));
2005 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
2006 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
2008 mtx_lock(&umtx_lock);
2009 if (pi->pi_owner == NULL) {
2010 mtx_unlock(&umtx_lock);
2011 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
2012 mtx_lock(&umtx_lock);
2014 if (pi->pi_owner == NULL)
2015 umtx_pi_setowner(pi, td1);
2016 PROC_UNLOCK(td1->td_proc);
2020 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
2021 pri = UPRI(uq1->uq_thread);
2027 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
2029 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
2031 uq->uq_pi_blocked = pi;
2033 td->td_flags |= TDF_UPIBLOCKED;
2035 umtx_propagate_priority(td);
2036 mtx_unlock(&umtx_lock);
2037 umtxq_unbusy(&uq->uq_key);
2039 error = umtxq_sleep(uq, wmesg, timo);
2042 mtx_lock(&umtx_lock);
2043 uq->uq_pi_blocked = NULL;
2045 td->td_flags &= ~TDF_UPIBLOCKED;
2047 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
2048 umtx_repropagate_priority(pi);
2049 mtx_unlock(&umtx_lock);
2050 umtxq_unlock(&uq->uq_key);
2056 * Add reference count for a PI mutex.
2059 umtx_pi_ref(struct umtx_pi *pi)
2062 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key));
2067 * Decrease reference count for a PI mutex, if the counter
2068 * is decreased to zero, its memory space is freed.
2071 umtx_pi_unref(struct umtx_pi *pi)
2073 struct umtxq_chain *uc;
2075 uc = umtxq_getchain(&pi->pi_key);
2076 UMTXQ_LOCKED_ASSERT(uc);
2077 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
2078 if (--pi->pi_refcount == 0) {
2079 mtx_lock(&umtx_lock);
2080 if (pi->pi_owner != NULL)
2082 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
2083 ("blocked queue not empty"));
2084 mtx_unlock(&umtx_lock);
2085 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
2091 * Find a PI mutex in hash table.
2094 umtx_pi_lookup(struct umtx_key *key)
2096 struct umtxq_chain *uc;
2099 uc = umtxq_getchain(key);
2100 UMTXQ_LOCKED_ASSERT(uc);
2102 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
2103 if (umtx_key_match(&pi->pi_key, key)) {
2111 * Insert a PI mutex into hash table.
2114 umtx_pi_insert(struct umtx_pi *pi)
2116 struct umtxq_chain *uc;
2118 uc = umtxq_getchain(&pi->pi_key);
2119 UMTXQ_LOCKED_ASSERT(uc);
2120 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
2124 * Drop a PI mutex and wakeup a top waiter.
2127 umtx_pi_drop(struct thread *td, struct umtx_key *key, bool rb, int *count)
2129 struct umtx_q *uq_first, *uq_first2, *uq_me;
2130 struct umtx_pi *pi, *pi2;
2133 UMTXQ_ASSERT_LOCKED_BUSY(key);
2134 *count = umtxq_count_pi(key, &uq_first);
2135 if (uq_first != NULL) {
2136 mtx_lock(&umtx_lock);
2137 pi = uq_first->uq_pi_blocked;
2138 KASSERT(pi != NULL, ("pi == NULL?"));
2139 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2140 mtx_unlock(&umtx_lock);
2141 /* userland messed the mutex */
2144 uq_me = td->td_umtxq;
2145 if (pi->pi_owner == td)
2147 /* get highest priority thread which is still sleeping. */
2148 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2149 while (uq_first != NULL &&
2150 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2151 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2154 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2155 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2156 if (uq_first2 != NULL) {
2157 if (pri > UPRI(uq_first2->uq_thread))
2158 pri = UPRI(uq_first2->uq_thread);
2162 sched_lend_user_prio(td, pri);
2164 mtx_unlock(&umtx_lock);
2166 umtxq_signal_thread(uq_first);
2168 pi = umtx_pi_lookup(key);
2170 * A umtx_pi can exist if a signal or timeout removed the
2171 * last waiter from the umtxq, but there is still
2172 * a thread in do_lock_pi() holding the umtx_pi.
2176 * The umtx_pi can be unowned, such as when a thread
2177 * has just entered do_lock_pi(), allocated the
2178 * umtx_pi, and unlocked the umtxq.
2179 * If the current thread owns it, it must disown it.
2181 mtx_lock(&umtx_lock);
2182 if (pi->pi_owner == td)
2184 mtx_unlock(&umtx_lock);
2194 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
2195 struct _umtx_time *timeout, int try)
2197 struct umtx_abs_timeout timo;
2199 struct umtx_pi *pi, *new_pi;
2200 uint32_t id, old_owner, owner, old;
2206 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2207 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2211 if (timeout != NULL)
2212 umtx_abs_timeout_init2(&timo, timeout);
2214 umtxq_lock(&uq->uq_key);
2215 pi = umtx_pi_lookup(&uq->uq_key);
2217 new_pi = umtx_pi_alloc(M_NOWAIT);
2218 if (new_pi == NULL) {
2219 umtxq_unlock(&uq->uq_key);
2220 new_pi = umtx_pi_alloc(M_WAITOK);
2221 umtxq_lock(&uq->uq_key);
2222 pi = umtx_pi_lookup(&uq->uq_key);
2224 umtx_pi_free(new_pi);
2228 if (new_pi != NULL) {
2229 new_pi->pi_key = uq->uq_key;
2230 umtx_pi_insert(new_pi);
2235 umtxq_unlock(&uq->uq_key);
2238 * Care must be exercised when dealing with umtx structure. It
2239 * can fault on any access.
2243 * Try the uncontested case. This should be done in userland.
2245 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
2246 /* The address was invalid. */
2251 /* The acquire succeeded. */
2253 MPASS(owner == UMUTEX_UNOWNED);
2258 if (owner == UMUTEX_RB_NOTRECOV) {
2259 error = ENOTRECOVERABLE;
2264 * Avoid overwriting a possible error from sleep due
2265 * to the pending signal with suspension check result.
2268 error = thread_check_susp(td, true);
2273 /* If no one owns it but it is contested try to acquire it. */
2274 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
2276 rv = casueword32(&m->m_owner, owner, &owner,
2277 id | UMUTEX_CONTESTED);
2278 /* The address was invalid. */
2285 error = thread_check_susp(td, true);
2291 * If this failed the lock could
2298 MPASS(owner == old_owner);
2299 umtxq_lock(&uq->uq_key);
2300 umtxq_busy(&uq->uq_key);
2301 error = umtx_pi_claim(pi, td);
2302 umtxq_unbusy(&uq->uq_key);
2303 umtxq_unlock(&uq->uq_key);
2306 * Since we're going to return an
2307 * error, restore the m_owner to its
2308 * previous, unowned state to avoid
2309 * compounding the problem.
2311 (void)casuword32(&m->m_owner,
2312 id | UMUTEX_CONTESTED, old_owner);
2314 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD)
2319 if ((owner & ~UMUTEX_CONTESTED) == id) {
2330 * If we caught a signal, we have retried and now
2336 umtxq_lock(&uq->uq_key);
2337 umtxq_busy(&uq->uq_key);
2338 umtxq_unlock(&uq->uq_key);
2341 * Set the contested bit so that a release in user space
2342 * knows to use the system call for unlock. If this fails
2343 * either some one else has acquired the lock or it has been
2346 rv = casueword32(&m->m_owner, owner, &old, owner |
2349 /* The address was invalid. */
2351 umtxq_unbusy_unlocked(&uq->uq_key);
2356 umtxq_unbusy_unlocked(&uq->uq_key);
2357 error = thread_check_susp(td, true);
2362 * The lock changed and we need to retry or we
2363 * lost a race to the thread unlocking the
2364 * umtx. Note that the UMUTEX_RB_OWNERDEAD
2365 * value for owner is impossible there.
2370 umtxq_lock(&uq->uq_key);
2372 /* We set the contested bit, sleep. */
2373 MPASS(old == owner);
2374 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
2375 "umtxpi", timeout == NULL ? NULL : &timo,
2376 (flags & USYNC_PROCESS_SHARED) != 0);
2380 error = thread_check_susp(td, false);
2385 umtxq_lock(&uq->uq_key);
2387 umtxq_unlock(&uq->uq_key);
2389 umtx_key_release(&uq->uq_key);
2394 * Unlock a PI mutex.
2397 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2399 struct umtx_key key;
2400 uint32_t id, new_owner, old, owner;
2407 * Make sure we own this mtx.
2409 error = fueword32(&m->m_owner, &owner);
2413 if ((owner & ~UMUTEX_CONTESTED) != id)
2416 new_owner = umtx_unlock_val(flags, rb);
2418 /* This should be done in userland */
2419 if ((owner & UMUTEX_CONTESTED) == 0) {
2420 error = casueword32(&m->m_owner, owner, &old, new_owner);
2424 error = thread_check_susp(td, true);
2434 /* We should only ever be in here for contested locks */
2435 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2436 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2442 error = umtx_pi_drop(td, &key, rb, &count);
2446 umtx_key_release(&key);
2447 /* userland messed the mutex */
2453 * When unlocking the umtx, it must be marked as unowned if
2454 * there is zero or one thread only waiting for it.
2455 * Otherwise, it must be marked as contested.
2459 new_owner |= UMUTEX_CONTESTED;
2461 error = casueword32(&m->m_owner, owner, &old, new_owner);
2463 error = thread_check_susp(td, false);
2467 umtxq_unbusy_unlocked(&key);
2468 umtx_key_release(&key);
2471 if (error == 0 && old != owner)
2480 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2481 struct _umtx_time *timeout, int try)
2483 struct umtx_abs_timeout timo;
2484 struct umtx_q *uq, *uq2;
2488 int error, pri, old_inherited_pri, su, rv;
2492 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2493 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2497 if (timeout != NULL)
2498 umtx_abs_timeout_init2(&timo, timeout);
2500 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2502 old_inherited_pri = uq->uq_inherited_pri;
2503 umtxq_lock(&uq->uq_key);
2504 umtxq_busy(&uq->uq_key);
2505 umtxq_unlock(&uq->uq_key);
2507 rv = fueword32(&m->m_ceilings[0], &ceiling);
2512 ceiling = RTP_PRIO_MAX - ceiling;
2513 if (ceiling > RTP_PRIO_MAX) {
2518 mtx_lock(&umtx_lock);
2519 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2520 mtx_unlock(&umtx_lock);
2524 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2525 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2527 if (uq->uq_inherited_pri < UPRI(td))
2528 sched_lend_user_prio(td, uq->uq_inherited_pri);
2531 mtx_unlock(&umtx_lock);
2533 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2534 id | UMUTEX_CONTESTED);
2535 /* The address was invalid. */
2541 MPASS(owner == UMUTEX_CONTESTED);
2546 if (owner == UMUTEX_RB_OWNERDEAD) {
2547 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2548 &owner, id | UMUTEX_CONTESTED);
2554 MPASS(owner == UMUTEX_RB_OWNERDEAD);
2555 error = EOWNERDEAD; /* success */
2560 * rv == 1, only check for suspension if we
2561 * did not already catched a signal. If we
2562 * get an error from the check, the same
2563 * condition is checked by the umtxq_sleep()
2564 * call below, so we should obliterate the
2565 * error to not skip the last loop iteration.
2568 error = thread_check_susp(td, false);
2577 } else if (owner == UMUTEX_RB_NOTRECOV) {
2578 error = ENOTRECOVERABLE;
2585 * If we caught a signal, we have retried and now
2591 umtxq_lock(&uq->uq_key);
2593 umtxq_unbusy(&uq->uq_key);
2594 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2597 umtxq_unlock(&uq->uq_key);
2599 mtx_lock(&umtx_lock);
2600 uq->uq_inherited_pri = old_inherited_pri;
2602 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2603 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2605 if (pri > UPRI(uq2->uq_thread))
2606 pri = UPRI(uq2->uq_thread);
2609 if (pri > uq->uq_inherited_pri)
2610 pri = uq->uq_inherited_pri;
2612 sched_lend_user_prio(td, pri);
2614 mtx_unlock(&umtx_lock);
2617 if (error != 0 && error != EOWNERDEAD) {
2618 mtx_lock(&umtx_lock);
2619 uq->uq_inherited_pri = old_inherited_pri;
2621 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2622 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2624 if (pri > UPRI(uq2->uq_thread))
2625 pri = UPRI(uq2->uq_thread);
2628 if (pri > uq->uq_inherited_pri)
2629 pri = uq->uq_inherited_pri;
2631 sched_lend_user_prio(td, pri);
2633 mtx_unlock(&umtx_lock);
2637 umtxq_unbusy_unlocked(&uq->uq_key);
2638 umtx_key_release(&uq->uq_key);
2643 * Unlock a PP mutex.
2646 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2648 struct umtx_key key;
2649 struct umtx_q *uq, *uq2;
2651 uint32_t id, owner, rceiling;
2652 int error, pri, new_inherited_pri, su;
2656 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2659 * Make sure we own this mtx.
2661 error = fueword32(&m->m_owner, &owner);
2665 if ((owner & ~UMUTEX_CONTESTED) != id)
2668 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2673 new_inherited_pri = PRI_MAX;
2675 rceiling = RTP_PRIO_MAX - rceiling;
2676 if (rceiling > RTP_PRIO_MAX)
2678 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2681 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2682 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2689 * For priority protected mutex, always set unlocked state
2690 * to UMUTEX_CONTESTED, so that userland always enters kernel
2691 * to lock the mutex, it is necessary because thread priority
2692 * has to be adjusted for such mutex.
2694 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2699 umtxq_signal(&key, 1);
2706 mtx_lock(&umtx_lock);
2708 uq->uq_inherited_pri = new_inherited_pri;
2710 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2711 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2713 if (pri > UPRI(uq2->uq_thread))
2714 pri = UPRI(uq2->uq_thread);
2717 if (pri > uq->uq_inherited_pri)
2718 pri = uq->uq_inherited_pri;
2720 sched_lend_user_prio(td, pri);
2722 mtx_unlock(&umtx_lock);
2724 umtx_key_release(&key);
2729 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2730 uint32_t *old_ceiling)
2733 uint32_t flags, id, owner, save_ceiling;
2736 error = fueword32(&m->m_flags, &flags);
2739 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2741 if (ceiling > RTP_PRIO_MAX)
2745 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2746 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2750 umtxq_lock(&uq->uq_key);
2751 umtxq_busy(&uq->uq_key);
2752 umtxq_unlock(&uq->uq_key);
2754 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2760 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2761 id | UMUTEX_CONTESTED);
2768 MPASS(owner == UMUTEX_CONTESTED);
2769 rv = suword32(&m->m_ceilings[0], ceiling);
2770 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2771 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2775 if ((owner & ~UMUTEX_CONTESTED) == id) {
2776 rv = suword32(&m->m_ceilings[0], ceiling);
2777 error = rv == 0 ? 0 : EFAULT;
2781 if (owner == UMUTEX_RB_OWNERDEAD) {
2784 } else if (owner == UMUTEX_RB_NOTRECOV) {
2785 error = ENOTRECOVERABLE;
2790 * If we caught a signal, we have retried and now
2797 * We set the contested bit, sleep. Otherwise the lock changed
2798 * and we need to retry or we lost a race to the thread
2799 * unlocking the umtx.
2801 umtxq_lock(&uq->uq_key);
2803 umtxq_unbusy(&uq->uq_key);
2804 error = umtxq_sleep(uq, "umtxpp", NULL);
2806 umtxq_unlock(&uq->uq_key);
2808 umtxq_lock(&uq->uq_key);
2810 umtxq_signal(&uq->uq_key, INT_MAX);
2811 umtxq_unbusy(&uq->uq_key);
2812 umtxq_unlock(&uq->uq_key);
2813 umtx_key_release(&uq->uq_key);
2814 if (error == 0 && old_ceiling != NULL) {
2815 rv = suword32(old_ceiling, save_ceiling);
2816 error = rv == 0 ? 0 : EFAULT;
2822 * Lock a userland POSIX mutex.
2825 do_lock_umutex(struct thread *td, struct umutex *m,
2826 struct _umtx_time *timeout, int mode)
2831 error = fueword32(&m->m_flags, &flags);
2835 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2837 error = do_lock_normal(td, m, flags, timeout, mode);
2839 case UMUTEX_PRIO_INHERIT:
2840 error = do_lock_pi(td, m, flags, timeout, mode);
2842 case UMUTEX_PRIO_PROTECT:
2843 error = do_lock_pp(td, m, flags, timeout, mode);
2848 if (timeout == NULL) {
2849 if (error == EINTR && mode != _UMUTEX_WAIT)
2852 /* Timed-locking is not restarted. */
2853 if (error == ERESTART)
2860 * Unlock a userland POSIX mutex.
2863 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2868 error = fueword32(&m->m_flags, &flags);
2872 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2874 return (do_unlock_normal(td, m, flags, rb));
2875 case UMUTEX_PRIO_INHERIT:
2876 return (do_unlock_pi(td, m, flags, rb));
2877 case UMUTEX_PRIO_PROTECT:
2878 return (do_unlock_pp(td, m, flags, rb));
2885 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2886 struct timespec *timeout, u_long wflags)
2888 struct umtx_abs_timeout timo;
2890 uint32_t flags, clockid, hasw;
2894 error = fueword32(&cv->c_flags, &flags);
2897 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2901 if ((wflags & CVWAIT_CLOCKID) != 0) {
2902 error = fueword32(&cv->c_clockid, &clockid);
2904 umtx_key_release(&uq->uq_key);
2907 if (clockid < CLOCK_REALTIME ||
2908 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2909 /* hmm, only HW clock id will work. */
2910 umtx_key_release(&uq->uq_key);
2914 clockid = CLOCK_REALTIME;
2917 umtxq_lock(&uq->uq_key);
2918 umtxq_busy(&uq->uq_key);
2920 umtxq_unlock(&uq->uq_key);
2923 * Set c_has_waiters to 1 before releasing user mutex, also
2924 * don't modify cache line when unnecessary.
2926 error = fueword32(&cv->c_has_waiters, &hasw);
2927 if (error == 0 && hasw == 0)
2928 suword32(&cv->c_has_waiters, 1);
2930 umtxq_unbusy_unlocked(&uq->uq_key);
2932 error = do_unlock_umutex(td, m, false);
2934 if (timeout != NULL)
2935 umtx_abs_timeout_init(&timo, clockid,
2936 (wflags & CVWAIT_ABSTIME) != 0, timeout);
2938 umtxq_lock(&uq->uq_key);
2940 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2944 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2948 * This must be timeout,interrupted by signal or
2949 * surprious wakeup, clear c_has_waiter flag when
2952 umtxq_busy(&uq->uq_key);
2953 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2954 int oldlen = uq->uq_cur_queue->length;
2957 umtxq_unlock(&uq->uq_key);
2958 suword32(&cv->c_has_waiters, 0);
2959 umtxq_lock(&uq->uq_key);
2962 umtxq_unbusy(&uq->uq_key);
2963 if (error == ERESTART)
2967 umtxq_unlock(&uq->uq_key);
2968 umtx_key_release(&uq->uq_key);
2973 * Signal a userland condition variable.
2976 do_cv_signal(struct thread *td, struct ucond *cv)
2978 struct umtx_key key;
2979 int error, cnt, nwake;
2982 error = fueword32(&cv->c_flags, &flags);
2985 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2989 cnt = umtxq_count(&key);
2990 nwake = umtxq_signal(&key, 1);
2993 error = suword32(&cv->c_has_waiters, 0);
3000 umtx_key_release(&key);
3005 do_cv_broadcast(struct thread *td, struct ucond *cv)
3007 struct umtx_key key;
3011 error = fueword32(&cv->c_flags, &flags);
3014 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
3019 umtxq_signal(&key, INT_MAX);
3022 error = suword32(&cv->c_has_waiters, 0);
3026 umtxq_unbusy_unlocked(&key);
3028 umtx_key_release(&key);
3033 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag,
3034 struct _umtx_time *timeout)
3036 struct umtx_abs_timeout timo;
3038 uint32_t flags, wrflags;
3039 int32_t state, oldstate;
3040 int32_t blocked_readers;
3041 int error, error1, rv;
3044 error = fueword32(&rwlock->rw_flags, &flags);
3047 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3051 if (timeout != NULL)
3052 umtx_abs_timeout_init2(&timo, timeout);
3054 wrflags = URWLOCK_WRITE_OWNER;
3055 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
3056 wrflags |= URWLOCK_WRITE_WAITERS;
3059 rv = fueword32(&rwlock->rw_state, &state);
3061 umtx_key_release(&uq->uq_key);
3065 /* try to lock it */
3066 while (!(state & wrflags)) {
3067 if (__predict_false(URWLOCK_READER_COUNT(state) ==
3068 URWLOCK_MAX_READERS)) {
3069 umtx_key_release(&uq->uq_key);
3072 rv = casueword32(&rwlock->rw_state, state,
3073 &oldstate, state + 1);
3075 umtx_key_release(&uq->uq_key);
3079 MPASS(oldstate == state);
3080 umtx_key_release(&uq->uq_key);
3083 error = thread_check_susp(td, true);
3092 /* grab monitor lock */
3093 umtxq_lock(&uq->uq_key);
3094 umtxq_busy(&uq->uq_key);
3095 umtxq_unlock(&uq->uq_key);
3098 * re-read the state, in case it changed between the try-lock above
3099 * and the check below
3101 rv = fueword32(&rwlock->rw_state, &state);
3105 /* set read contention bit */
3106 while (error == 0 && (state & wrflags) &&
3107 !(state & URWLOCK_READ_WAITERS)) {
3108 rv = casueword32(&rwlock->rw_state, state,
3109 &oldstate, state | URWLOCK_READ_WAITERS);
3115 MPASS(oldstate == state);
3119 error = thread_check_susp(td, false);
3124 umtxq_unbusy_unlocked(&uq->uq_key);
3128 /* state is changed while setting flags, restart */
3129 if (!(state & wrflags)) {
3130 umtxq_unbusy_unlocked(&uq->uq_key);
3131 error = thread_check_susp(td, true);
3139 * Contention bit is set, before sleeping, increase
3140 * read waiter count.
3142 rv = fueword32(&rwlock->rw_blocked_readers,
3145 umtxq_unbusy_unlocked(&uq->uq_key);
3149 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
3151 while (state & wrflags) {
3152 umtxq_lock(&uq->uq_key);
3154 umtxq_unbusy(&uq->uq_key);
3156 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
3159 umtxq_busy(&uq->uq_key);
3161 umtxq_unlock(&uq->uq_key);
3164 rv = fueword32(&rwlock->rw_state, &state);
3171 /* decrease read waiter count, and may clear read contention bit */
3172 rv = fueword32(&rwlock->rw_blocked_readers,
3175 umtxq_unbusy_unlocked(&uq->uq_key);
3179 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
3180 if (blocked_readers == 1) {
3181 rv = fueword32(&rwlock->rw_state, &state);
3183 umtxq_unbusy_unlocked(&uq->uq_key);
3188 rv = casueword32(&rwlock->rw_state, state,
3189 &oldstate, state & ~URWLOCK_READ_WAITERS);
3195 MPASS(oldstate == state);
3199 error1 = thread_check_susp(td, false);
3208 umtxq_unbusy_unlocked(&uq->uq_key);
3212 umtx_key_release(&uq->uq_key);
3213 if (error == ERESTART)
3219 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
3221 struct umtx_abs_timeout timo;
3224 int32_t state, oldstate;
3225 int32_t blocked_writers;
3226 int32_t blocked_readers;
3227 int error, error1, rv;
3230 error = fueword32(&rwlock->rw_flags, &flags);
3233 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3237 if (timeout != NULL)
3238 umtx_abs_timeout_init2(&timo, timeout);
3240 blocked_readers = 0;
3242 rv = fueword32(&rwlock->rw_state, &state);
3244 umtx_key_release(&uq->uq_key);
3247 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
3248 URWLOCK_READER_COUNT(state) == 0) {
3249 rv = casueword32(&rwlock->rw_state, state,
3250 &oldstate, state | URWLOCK_WRITE_OWNER);
3252 umtx_key_release(&uq->uq_key);
3256 MPASS(oldstate == state);
3257 umtx_key_release(&uq->uq_key);
3261 error = thread_check_susp(td, true);
3267 if ((state & (URWLOCK_WRITE_OWNER |
3268 URWLOCK_WRITE_WAITERS)) == 0 &&
3269 blocked_readers != 0) {
3270 umtxq_lock(&uq->uq_key);
3271 umtxq_busy(&uq->uq_key);
3272 umtxq_signal_queue(&uq->uq_key, INT_MAX,
3274 umtxq_unbusy(&uq->uq_key);
3275 umtxq_unlock(&uq->uq_key);
3281 /* grab monitor lock */
3282 umtxq_lock(&uq->uq_key);
3283 umtxq_busy(&uq->uq_key);
3284 umtxq_unlock(&uq->uq_key);
3287 * Re-read the state, in case it changed between the
3288 * try-lock above and the check below.
3290 rv = fueword32(&rwlock->rw_state, &state);
3294 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
3295 URWLOCK_READER_COUNT(state) != 0) &&
3296 (state & URWLOCK_WRITE_WAITERS) == 0) {
3297 rv = casueword32(&rwlock->rw_state, state,
3298 &oldstate, state | URWLOCK_WRITE_WAITERS);
3304 MPASS(oldstate == state);
3308 error = thread_check_susp(td, false);
3313 umtxq_unbusy_unlocked(&uq->uq_key);
3317 if ((state & URWLOCK_WRITE_OWNER) == 0 &&
3318 URWLOCK_READER_COUNT(state) == 0) {
3319 umtxq_unbusy_unlocked(&uq->uq_key);
3320 error = thread_check_susp(td, false);
3326 rv = fueword32(&rwlock->rw_blocked_writers,
3329 umtxq_unbusy_unlocked(&uq->uq_key);
3333 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1);
3335 while ((state & URWLOCK_WRITE_OWNER) ||
3336 URWLOCK_READER_COUNT(state) != 0) {
3337 umtxq_lock(&uq->uq_key);
3338 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3339 umtxq_unbusy(&uq->uq_key);
3341 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
3344 umtxq_busy(&uq->uq_key);
3345 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3346 umtxq_unlock(&uq->uq_key);
3349 rv = fueword32(&rwlock->rw_state, &state);
3356 rv = fueword32(&rwlock->rw_blocked_writers,
3359 umtxq_unbusy_unlocked(&uq->uq_key);
3363 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
3364 if (blocked_writers == 1) {
3365 rv = fueword32(&rwlock->rw_state, &state);
3367 umtxq_unbusy_unlocked(&uq->uq_key);
3372 rv = casueword32(&rwlock->rw_state, state,
3373 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
3379 MPASS(oldstate == state);
3383 error1 = thread_check_susp(td, false);
3385 * We are leaving the URWLOCK_WRITE_WAITERS
3386 * behind, but this should not harm the
3395 rv = fueword32(&rwlock->rw_blocked_readers,
3398 umtxq_unbusy_unlocked(&uq->uq_key);
3403 blocked_readers = 0;
3405 umtxq_unbusy_unlocked(&uq->uq_key);
3408 umtx_key_release(&uq->uq_key);
3409 if (error == ERESTART)
3415 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3419 int32_t state, oldstate;
3420 int error, rv, q, count;
3423 error = fueword32(&rwlock->rw_flags, &flags);
3426 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3430 error = fueword32(&rwlock->rw_state, &state);
3435 if (state & URWLOCK_WRITE_OWNER) {
3437 rv = casueword32(&rwlock->rw_state, state,
3438 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3445 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3449 error = thread_check_susp(td, true);
3455 } else if (URWLOCK_READER_COUNT(state) != 0) {
3457 rv = casueword32(&rwlock->rw_state, state,
3458 &oldstate, state - 1);
3465 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3469 error = thread_check_susp(td, true);
3482 if (!(flags & URWLOCK_PREFER_READER)) {
3483 if (state & URWLOCK_WRITE_WAITERS) {
3485 q = UMTX_EXCLUSIVE_QUEUE;
3486 } else if (state & URWLOCK_READ_WAITERS) {
3488 q = UMTX_SHARED_QUEUE;
3491 if (state & URWLOCK_READ_WAITERS) {
3493 q = UMTX_SHARED_QUEUE;
3494 } else if (state & URWLOCK_WRITE_WAITERS) {
3496 q = UMTX_EXCLUSIVE_QUEUE;
3501 umtxq_lock(&uq->uq_key);
3502 umtxq_busy(&uq->uq_key);
3503 umtxq_signal_queue(&uq->uq_key, count, q);
3504 umtxq_unbusy(&uq->uq_key);
3505 umtxq_unlock(&uq->uq_key);
3508 umtx_key_release(&uq->uq_key);
3512 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3514 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3516 struct umtx_abs_timeout timo;
3518 uint32_t flags, count, count1;
3522 error = fueword32(&sem->_flags, &flags);
3525 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3529 if (timeout != NULL)
3530 umtx_abs_timeout_init2(&timo, timeout);
3533 umtxq_lock(&uq->uq_key);
3534 umtxq_busy(&uq->uq_key);
3536 umtxq_unlock(&uq->uq_key);
3537 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3539 rv1 = fueword32(&sem->_count, &count);
3540 if (rv == -1 || (rv == 0 && (rv1 == -1 || count != 0)) ||
3541 (rv == 1 && count1 == 0)) {
3542 umtxq_lock(&uq->uq_key);
3543 umtxq_unbusy(&uq->uq_key);
3545 umtxq_unlock(&uq->uq_key);
3547 rv = thread_check_susp(td, true);
3555 error = rv == -1 ? EFAULT : 0;
3558 umtxq_lock(&uq->uq_key);
3559 umtxq_unbusy(&uq->uq_key);
3561 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3563 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3567 /* A relative timeout cannot be restarted. */
3568 if (error == ERESTART && timeout != NULL &&
3569 (timeout->_flags & UMTX_ABSTIME) == 0)
3572 umtxq_unlock(&uq->uq_key);
3574 umtx_key_release(&uq->uq_key);
3579 * Signal a userland semaphore.
3582 do_sem_wake(struct thread *td, struct _usem *sem)
3584 struct umtx_key key;
3588 error = fueword32(&sem->_flags, &flags);
3591 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3595 cnt = umtxq_count(&key);
3598 * Check if count is greater than 0, this means the memory is
3599 * still being referenced by user code, so we can safely
3600 * update _has_waiters flag.
3604 error = suword32(&sem->_has_waiters, 0);
3609 umtxq_signal(&key, 1);
3613 umtx_key_release(&key);
3619 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3621 struct umtx_abs_timeout timo;
3623 uint32_t count, flags;
3627 flags = fuword32(&sem->_flags);
3628 if (timeout != NULL)
3629 umtx_abs_timeout_init2(&timo, timeout);
3632 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3635 umtxq_lock(&uq->uq_key);
3636 umtxq_busy(&uq->uq_key);
3638 umtxq_unlock(&uq->uq_key);
3639 rv = fueword32(&sem->_count, &count);
3641 umtxq_lock(&uq->uq_key);
3642 umtxq_unbusy(&uq->uq_key);
3644 umtxq_unlock(&uq->uq_key);
3645 umtx_key_release(&uq->uq_key);
3649 if (USEM_COUNT(count) != 0) {
3650 umtxq_lock(&uq->uq_key);
3651 umtxq_unbusy(&uq->uq_key);
3653 umtxq_unlock(&uq->uq_key);
3654 umtx_key_release(&uq->uq_key);
3657 if (count == USEM_HAS_WAITERS)
3659 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3662 umtxq_lock(&uq->uq_key);
3663 umtxq_unbusy(&uq->uq_key);
3665 umtxq_unlock(&uq->uq_key);
3666 umtx_key_release(&uq->uq_key);
3669 rv = thread_check_susp(td, true);
3674 umtxq_lock(&uq->uq_key);
3675 umtxq_unbusy(&uq->uq_key);
3677 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3679 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3683 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3684 /* A relative timeout cannot be restarted. */
3685 if (error == ERESTART)
3687 if (error == EINTR) {
3688 kern_clock_gettime(curthread, timo.clockid,
3690 timespecsub(&timo.end, &timo.cur,
3691 &timeout->_timeout);
3695 umtxq_unlock(&uq->uq_key);
3696 umtx_key_release(&uq->uq_key);
3701 * Signal a userland semaphore.
3704 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3706 struct umtx_key key;
3708 uint32_t count, flags;
3710 rv = fueword32(&sem->_flags, &flags);
3713 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3717 cnt = umtxq_count(&key);
3720 * If this was the last sleeping thread, clear the waiters
3725 rv = fueword32(&sem->_count, &count);
3726 while (rv != -1 && count & USEM_HAS_WAITERS) {
3727 rv = casueword32(&sem->_count, count, &count,
3728 count & ~USEM_HAS_WAITERS);
3730 rv = thread_check_susp(td, true);
3743 umtxq_signal(&key, 1);
3747 umtx_key_release(&key);
3751 #ifdef COMPAT_FREEBSD10
3753 freebsd10__umtx_lock(struct thread *td, struct freebsd10__umtx_lock_args *uap)
3755 return (do_lock_umtx(td, uap->umtx, td->td_tid, 0));
3759 freebsd10__umtx_unlock(struct thread *td,
3760 struct freebsd10__umtx_unlock_args *uap)
3762 return (do_unlock_umtx(td, uap->umtx, td->td_tid));
3767 umtx_copyin_timeout(const void *uaddr, struct timespec *tsp)
3771 error = copyin(uaddr, tsp, sizeof(*tsp));
3773 if (tsp->tv_sec < 0 ||
3774 tsp->tv_nsec >= 1000000000 ||
3782 umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp)
3786 if (size <= sizeof(tp->_timeout)) {
3787 tp->_clockid = CLOCK_REALTIME;
3789 error = copyin(uaddr, &tp->_timeout, sizeof(tp->_timeout));
3791 error = copyin(uaddr, tp, sizeof(*tp));
3794 if (tp->_timeout.tv_sec < 0 ||
3795 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3801 umtx_copyin_robust_lists(const void *uaddr, size_t size,
3802 struct umtx_robust_lists_params *rb)
3805 if (size > sizeof(*rb))
3807 return (copyin(uaddr, rb, size));
3811 umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp)
3815 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
3816 * and we're only called if sz >= sizeof(timespec) as supplied in the
3819 KASSERT(sz >= sizeof(*tsp),
3820 ("umtx_copyops specifies incorrect sizes"));
3822 return (copyout(tsp, uaddr, sizeof(*tsp)));
3825 #ifdef COMPAT_FREEBSD10
3827 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap,
3828 const struct umtx_copyops *ops)
3830 struct timespec *ts, timeout;
3833 /* Allow a null timespec (wait forever). */
3834 if (uap->uaddr2 == NULL)
3837 error = ops->copyin_timeout(uap->uaddr2, &timeout);
3842 #ifdef COMPAT_FREEBSD32
3844 return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3846 return (do_lock_umtx(td, uap->obj, uap->val, ts));
3850 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap,
3851 const struct umtx_copyops *ops)
3853 #ifdef COMPAT_FREEBSD32
3855 return (do_unlock_umtx32(td, uap->obj, uap->val));
3857 return (do_unlock_umtx(td, uap->obj, uap->val));
3859 #endif /* COMPAT_FREEBSD10 */
3861 #if !defined(COMPAT_FREEBSD10)
3863 __umtx_op_unimpl(struct thread *td __unused, struct _umtx_op_args *uap __unused,
3864 const struct umtx_copyops *ops __unused)
3866 return (EOPNOTSUPP);
3868 #endif /* COMPAT_FREEBSD10 */
3871 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap,
3872 const struct umtx_copyops *ops)
3874 struct _umtx_time timeout, *tm_p;
3877 if (uap->uaddr2 == NULL)
3880 error = ops->copyin_umtx_time(
3881 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3886 return (do_wait(td, uap->obj, uap->val, tm_p, ops->compat32, 0));
3890 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap,
3891 const struct umtx_copyops *ops)
3893 struct _umtx_time timeout, *tm_p;
3896 if (uap->uaddr2 == NULL)
3899 error = ops->copyin_umtx_time(
3900 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3905 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3909 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap,
3910 const struct umtx_copyops *ops)
3912 struct _umtx_time *tm_p, timeout;
3915 if (uap->uaddr2 == NULL)
3918 error = ops->copyin_umtx_time(
3919 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3924 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3928 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap,
3929 const struct umtx_copyops *ops __unused)
3932 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3935 #define BATCH_SIZE 128
3937 __umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap)
3939 char *uaddrs[BATCH_SIZE], **upp;
3940 int count, error, i, pos, tocopy;
3942 upp = (char **)uap->obj;
3944 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3946 tocopy = MIN(count, BATCH_SIZE);
3947 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3950 for (i = 0; i < tocopy; ++i) {
3951 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3959 __umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3961 uint32_t uaddrs[BATCH_SIZE], *upp;
3962 int count, error, i, pos, tocopy;
3964 upp = (uint32_t *)uap->obj;
3966 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3968 tocopy = MIN(count, BATCH_SIZE);
3969 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
3972 for (i = 0; i < tocopy; ++i) {
3973 kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i],
3982 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap,
3983 const struct umtx_copyops *ops)
3987 return (__umtx_op_nwake_private_compat32(td, uap));
3988 return (__umtx_op_nwake_private_native(td, uap));
3992 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap,
3993 const struct umtx_copyops *ops __unused)
3996 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
4000 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap,
4001 const struct umtx_copyops *ops)
4003 struct _umtx_time *tm_p, timeout;
4006 /* Allow a null timespec (wait forever). */
4007 if (uap->uaddr2 == NULL)
4010 error = ops->copyin_umtx_time(
4011 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
4016 return (do_lock_umutex(td, uap->obj, tm_p, 0));
4020 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap,
4021 const struct umtx_copyops *ops __unused)
4024 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
4028 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap,
4029 const struct umtx_copyops *ops)
4031 struct _umtx_time *tm_p, timeout;
4034 /* Allow a null timespec (wait forever). */
4035 if (uap->uaddr2 == NULL)
4038 error = ops->copyin_umtx_time(
4039 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
4044 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
4048 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap,
4049 const struct umtx_copyops *ops __unused)
4052 return (do_wake_umutex(td, uap->obj));
4056 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap,
4057 const struct umtx_copyops *ops __unused)
4060 return (do_unlock_umutex(td, uap->obj, false));
4064 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap,
4065 const struct umtx_copyops *ops __unused)
4068 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
4072 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap,
4073 const struct umtx_copyops *ops)
4075 struct timespec *ts, timeout;
4078 /* Allow a null timespec (wait forever). */
4079 if (uap->uaddr2 == NULL)
4082 error = ops->copyin_timeout(uap->uaddr2, &timeout);
4087 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
4091 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap,
4092 const struct umtx_copyops *ops __unused)
4095 return (do_cv_signal(td, uap->obj));
4099 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap,
4100 const struct umtx_copyops *ops __unused)
4103 return (do_cv_broadcast(td, uap->obj));
4107 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap,
4108 const struct umtx_copyops *ops)
4110 struct _umtx_time timeout;
4113 /* Allow a null timespec (wait forever). */
4114 if (uap->uaddr2 == NULL) {
4115 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
4117 error = ops->copyin_umtx_time(uap->uaddr2,
4118 (size_t)uap->uaddr1, &timeout);
4121 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4127 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap,
4128 const struct umtx_copyops *ops)
4130 struct _umtx_time timeout;
4133 /* Allow a null timespec (wait forever). */
4134 if (uap->uaddr2 == NULL) {
4135 error = do_rw_wrlock(td, uap->obj, 0);
4137 error = ops->copyin_umtx_time(uap->uaddr2,
4138 (size_t)uap->uaddr1, &timeout);
4142 error = do_rw_wrlock(td, uap->obj, &timeout);
4148 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap,
4149 const struct umtx_copyops *ops __unused)
4152 return (do_rw_unlock(td, uap->obj));
4155 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4157 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap,
4158 const struct umtx_copyops *ops)
4160 struct _umtx_time *tm_p, timeout;
4163 /* Allow a null timespec (wait forever). */
4164 if (uap->uaddr2 == NULL)
4167 error = ops->copyin_umtx_time(
4168 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
4173 return (do_sem_wait(td, uap->obj, tm_p));
4177 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap,
4178 const struct umtx_copyops *ops __unused)
4181 return (do_sem_wake(td, uap->obj));
4186 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap,
4187 const struct umtx_copyops *ops __unused)
4190 return (do_wake2_umutex(td, uap->obj, uap->val));
4194 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap,
4195 const struct umtx_copyops *ops)
4197 struct _umtx_time *tm_p, timeout;
4201 /* Allow a null timespec (wait forever). */
4202 if (uap->uaddr2 == NULL) {
4206 uasize = (size_t)uap->uaddr1;
4207 error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout);
4212 error = do_sem2_wait(td, uap->obj, tm_p);
4213 if (error == EINTR && uap->uaddr2 != NULL &&
4214 (timeout._flags & UMTX_ABSTIME) == 0 &&
4215 uasize >= ops->umtx_time_sz + ops->timespec_sz) {
4216 error = ops->copyout_timeout(
4217 (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz),
4218 uasize - ops->umtx_time_sz, &timeout._timeout);
4228 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap,
4229 const struct umtx_copyops *ops __unused)
4232 return (do_sem2_wake(td, uap->obj));
4235 #define USHM_OBJ_UMTX(o) \
4236 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
4238 #define USHMF_REG_LINKED 0x0001
4239 #define USHMF_OBJ_LINKED 0x0002
4240 struct umtx_shm_reg {
4241 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
4242 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
4243 struct umtx_key ushm_key;
4244 struct ucred *ushm_cred;
4245 struct shmfd *ushm_obj;
4250 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
4251 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
4253 static uma_zone_t umtx_shm_reg_zone;
4254 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
4255 static struct mtx umtx_shm_lock;
4256 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
4257 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
4259 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
4262 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
4264 struct umtx_shm_reg_head d;
4265 struct umtx_shm_reg *reg, *reg1;
4268 mtx_lock(&umtx_shm_lock);
4269 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
4270 mtx_unlock(&umtx_shm_lock);
4271 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
4272 TAILQ_REMOVE(&d, reg, ushm_reg_link);
4273 umtx_shm_free_reg(reg);
4277 static struct task umtx_shm_reg_delfree_task =
4278 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
4280 static struct umtx_shm_reg *
4281 umtx_shm_find_reg_locked(const struct umtx_key *key)
4283 struct umtx_shm_reg *reg;
4284 struct umtx_shm_reg_head *reg_head;
4286 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
4287 mtx_assert(&umtx_shm_lock, MA_OWNED);
4288 reg_head = &umtx_shm_registry[key->hash];
4289 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
4290 KASSERT(reg->ushm_key.shared,
4291 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
4292 if (reg->ushm_key.info.shared.object ==
4293 key->info.shared.object &&
4294 reg->ushm_key.info.shared.offset ==
4295 key->info.shared.offset) {
4296 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
4297 KASSERT(reg->ushm_refcnt > 0,
4298 ("reg %p refcnt 0 onlist", reg));
4299 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
4300 ("reg %p not linked", reg));
4308 static struct umtx_shm_reg *
4309 umtx_shm_find_reg(const struct umtx_key *key)
4311 struct umtx_shm_reg *reg;
4313 mtx_lock(&umtx_shm_lock);
4314 reg = umtx_shm_find_reg_locked(key);
4315 mtx_unlock(&umtx_shm_lock);
4320 umtx_shm_free_reg(struct umtx_shm_reg *reg)
4323 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
4324 crfree(reg->ushm_cred);
4325 shm_drop(reg->ushm_obj);
4326 uma_zfree(umtx_shm_reg_zone, reg);
4330 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
4334 mtx_assert(&umtx_shm_lock, MA_OWNED);
4335 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
4337 res = reg->ushm_refcnt == 0;
4339 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
4340 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
4341 reg, ushm_reg_link);
4342 reg->ushm_flags &= ~USHMF_REG_LINKED;
4344 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
4345 LIST_REMOVE(reg, ushm_obj_link);
4346 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
4353 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
4359 object = reg->ushm_obj->shm_object;
4360 VM_OBJECT_WLOCK(object);
4361 object->flags |= OBJ_UMTXDEAD;
4362 VM_OBJECT_WUNLOCK(object);
4364 mtx_lock(&umtx_shm_lock);
4365 dofree = umtx_shm_unref_reg_locked(reg, force);
4366 mtx_unlock(&umtx_shm_lock);
4368 umtx_shm_free_reg(reg);
4372 umtx_shm_object_init(vm_object_t object)
4375 LIST_INIT(USHM_OBJ_UMTX(object));
4379 umtx_shm_object_terminated(vm_object_t object)
4381 struct umtx_shm_reg *reg, *reg1;
4384 if (LIST_EMPTY(USHM_OBJ_UMTX(object)))
4388 mtx_lock(&umtx_shm_lock);
4389 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
4390 if (umtx_shm_unref_reg_locked(reg, true)) {
4391 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
4396 mtx_unlock(&umtx_shm_lock);
4398 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
4402 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
4403 struct umtx_shm_reg **res)
4405 struct umtx_shm_reg *reg, *reg1;
4409 reg = umtx_shm_find_reg(key);
4414 cred = td->td_ucred;
4415 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
4417 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
4418 reg->ushm_refcnt = 1;
4419 bcopy(key, ®->ushm_key, sizeof(*key));
4420 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR, false);
4421 reg->ushm_cred = crhold(cred);
4422 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
4424 umtx_shm_free_reg(reg);
4427 mtx_lock(&umtx_shm_lock);
4428 reg1 = umtx_shm_find_reg_locked(key);
4430 mtx_unlock(&umtx_shm_lock);
4431 umtx_shm_free_reg(reg);
4436 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
4437 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
4439 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
4440 mtx_unlock(&umtx_shm_lock);
4446 umtx_shm_alive(struct thread *td, void *addr)
4449 vm_map_entry_t entry;
4456 map = &td->td_proc->p_vmspace->vm_map;
4457 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
4458 &object, &pindex, &prot, &wired);
4459 if (res != KERN_SUCCESS)
4464 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
4465 vm_map_lookup_done(map, entry);
4474 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
4475 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4476 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
4477 for (i = 0; i < nitems(umtx_shm_registry); i++)
4478 TAILQ_INIT(&umtx_shm_registry[i]);
4482 umtx_shm(struct thread *td, void *addr, u_int flags)
4484 struct umtx_key key;
4485 struct umtx_shm_reg *reg;
4489 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
4490 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
4492 if ((flags & UMTX_SHM_ALIVE) != 0)
4493 return (umtx_shm_alive(td, addr));
4494 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
4497 KASSERT(key.shared == 1, ("non-shared key"));
4498 if ((flags & UMTX_SHM_CREAT) != 0) {
4499 error = umtx_shm_create_reg(td, &key, ®);
4501 reg = umtx_shm_find_reg(&key);
4505 umtx_key_release(&key);
4508 KASSERT(reg != NULL, ("no reg"));
4509 if ((flags & UMTX_SHM_DESTROY) != 0) {
4510 umtx_shm_unref_reg(reg, true);
4514 error = mac_posixshm_check_open(td->td_ucred,
4515 reg->ushm_obj, FFLAGS(O_RDWR));
4518 error = shm_access(reg->ushm_obj, td->td_ucred,
4522 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
4524 shm_hold(reg->ushm_obj);
4525 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
4527 td->td_retval[0] = fd;
4531 umtx_shm_unref_reg(reg, false);
4536 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap,
4537 const struct umtx_copyops *ops __unused)
4540 return (umtx_shm(td, uap->uaddr1, uap->val));
4544 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap,
4545 const struct umtx_copyops *ops)
4547 struct umtx_robust_lists_params rb;
4550 if (ops->compat32) {
4551 if ((td->td_pflags2 & TDP2_COMPAT32RB) == 0 &&
4552 (td->td_rb_list != 0 || td->td_rbp_list != 0 ||
4553 td->td_rb_inact != 0))
4555 } else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0) {
4559 bzero(&rb, sizeof(rb));
4560 error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb);
4565 td->td_pflags2 |= TDP2_COMPAT32RB;
4567 td->td_rb_list = rb.robust_list_offset;
4568 td->td_rbp_list = rb.robust_priv_list_offset;
4569 td->td_rb_inact = rb.robust_inact_offset;
4573 #if defined(__i386__) || defined(__amd64__)
4575 * Provide the standard 32-bit definitions for x86, since native/compat32 use a
4576 * 32-bit time_t there. Other architectures just need the i386 definitions
4577 * along with their standard compat32.
4579 struct timespecx32 {
4584 struct umtx_timex32 {
4585 struct timespecx32 _timeout;
4591 #define timespeci386 timespec32
4592 #define umtx_timei386 umtx_time32
4594 #else /* !__i386__ && !__amd64__ */
4595 /* 32-bit architectures can emulate i386, so define these almost everywhere. */
4596 struct timespeci386 {
4601 struct umtx_timei386 {
4602 struct timespeci386 _timeout;
4607 #if defined(__LP64__)
4608 #define timespecx32 timespec32
4609 #define umtx_timex32 umtx_time32
4614 umtx_copyin_robust_lists32(const void *uaddr, size_t size,
4615 struct umtx_robust_lists_params *rbp)
4617 struct umtx_robust_lists_params_compat32 rb32;
4620 if (size > sizeof(rb32))
4622 bzero(&rb32, sizeof(rb32));
4623 error = copyin(uaddr, &rb32, size);
4626 CP(rb32, *rbp, robust_list_offset);
4627 CP(rb32, *rbp, robust_priv_list_offset);
4628 CP(rb32, *rbp, robust_inact_offset);
4634 umtx_copyin_timeouti386(const void *uaddr, struct timespec *tsp)
4636 struct timespeci386 ts32;
4639 error = copyin(uaddr, &ts32, sizeof(ts32));
4641 if (ts32.tv_sec < 0 ||
4642 ts32.tv_nsec >= 1000000000 ||
4646 CP(ts32, *tsp, tv_sec);
4647 CP(ts32, *tsp, tv_nsec);
4654 umtx_copyin_umtx_timei386(const void *uaddr, size_t size, struct _umtx_time *tp)
4656 struct umtx_timei386 t32;
4659 t32._clockid = CLOCK_REALTIME;
4661 if (size <= sizeof(t32._timeout))
4662 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4664 error = copyin(uaddr, &t32, sizeof(t32));
4667 if (t32._timeout.tv_sec < 0 ||
4668 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4670 TS_CP(t32, *tp, _timeout);
4671 CP(t32, *tp, _flags);
4672 CP(t32, *tp, _clockid);
4677 umtx_copyout_timeouti386(void *uaddr, size_t sz, struct timespec *tsp)
4679 struct timespeci386 remain32 = {
4680 .tv_sec = tsp->tv_sec,
4681 .tv_nsec = tsp->tv_nsec,
4685 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4686 * and we're only called if sz >= sizeof(timespec) as supplied in the
4689 KASSERT(sz >= sizeof(remain32),
4690 ("umtx_copyops specifies incorrect sizes"));
4692 return (copyout(&remain32, uaddr, sizeof(remain32)));
4694 #endif /* !__i386__ */
4696 #if defined(__i386__) || defined(__LP64__)
4698 umtx_copyin_timeoutx32(const void *uaddr, struct timespec *tsp)
4700 struct timespecx32 ts32;
4703 error = copyin(uaddr, &ts32, sizeof(ts32));
4705 if (ts32.tv_sec < 0 ||
4706 ts32.tv_nsec >= 1000000000 ||
4710 CP(ts32, *tsp, tv_sec);
4711 CP(ts32, *tsp, tv_nsec);
4718 umtx_copyin_umtx_timex32(const void *uaddr, size_t size, struct _umtx_time *tp)
4720 struct umtx_timex32 t32;
4723 t32._clockid = CLOCK_REALTIME;
4725 if (size <= sizeof(t32._timeout))
4726 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4728 error = copyin(uaddr, &t32, sizeof(t32));
4731 if (t32._timeout.tv_sec < 0 ||
4732 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4734 TS_CP(t32, *tp, _timeout);
4735 CP(t32, *tp, _flags);
4736 CP(t32, *tp, _clockid);
4741 umtx_copyout_timeoutx32(void *uaddr, size_t sz, struct timespec *tsp)
4743 struct timespecx32 remain32 = {
4744 .tv_sec = tsp->tv_sec,
4745 .tv_nsec = tsp->tv_nsec,
4749 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4750 * and we're only called if sz >= sizeof(timespec) as supplied in the
4753 KASSERT(sz >= sizeof(remain32),
4754 ("umtx_copyops specifies incorrect sizes"));
4756 return (copyout(&remain32, uaddr, sizeof(remain32)));
4758 #endif /* __i386__ || __LP64__ */
4760 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap,
4761 const struct umtx_copyops *umtx_ops);
4763 static const _umtx_op_func op_table[] = {
4764 #ifdef COMPAT_FREEBSD10
4765 [UMTX_OP_LOCK] = __umtx_op_lock_umtx,
4766 [UMTX_OP_UNLOCK] = __umtx_op_unlock_umtx,
4768 [UMTX_OP_LOCK] = __umtx_op_unimpl,
4769 [UMTX_OP_UNLOCK] = __umtx_op_unimpl,
4771 [UMTX_OP_WAIT] = __umtx_op_wait,
4772 [UMTX_OP_WAKE] = __umtx_op_wake,
4773 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4774 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
4775 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4776 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4777 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4778 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4779 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4780 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4781 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4782 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4783 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4784 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4785 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4786 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4787 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4788 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4789 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4790 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4792 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4793 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4795 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4796 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4797 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4798 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4799 [UMTX_OP_SHM] = __umtx_op_shm,
4800 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4803 static const struct umtx_copyops umtx_native_ops = {
4804 .copyin_timeout = umtx_copyin_timeout,
4805 .copyin_umtx_time = umtx_copyin_umtx_time,
4806 .copyin_robust_lists = umtx_copyin_robust_lists,
4807 .copyout_timeout = umtx_copyout_timeout,
4808 .timespec_sz = sizeof(struct timespec),
4809 .umtx_time_sz = sizeof(struct _umtx_time),
4813 static const struct umtx_copyops umtx_native_opsi386 = {
4814 .copyin_timeout = umtx_copyin_timeouti386,
4815 .copyin_umtx_time = umtx_copyin_umtx_timei386,
4816 .copyin_robust_lists = umtx_copyin_robust_lists32,
4817 .copyout_timeout = umtx_copyout_timeouti386,
4818 .timespec_sz = sizeof(struct timespeci386),
4819 .umtx_time_sz = sizeof(struct umtx_timei386),
4824 #if defined(__i386__) || defined(__LP64__)
4825 /* i386 can emulate other 32-bit archs, too! */
4826 static const struct umtx_copyops umtx_native_opsx32 = {
4827 .copyin_timeout = umtx_copyin_timeoutx32,
4828 .copyin_umtx_time = umtx_copyin_umtx_timex32,
4829 .copyin_robust_lists = umtx_copyin_robust_lists32,
4830 .copyout_timeout = umtx_copyout_timeoutx32,
4831 .timespec_sz = sizeof(struct timespecx32),
4832 .umtx_time_sz = sizeof(struct umtx_timex32),
4836 #ifdef COMPAT_FREEBSD32
4838 #define umtx_native_ops32 umtx_native_opsi386
4840 #define umtx_native_ops32 umtx_native_opsx32
4842 #endif /* COMPAT_FREEBSD32 */
4843 #endif /* __i386__ || __LP64__ */
4845 #define UMTX_OP__FLAGS (UMTX_OP__32BIT | UMTX_OP__I386)
4848 kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val,
4849 void *uaddr1, void *uaddr2, const struct umtx_copyops *ops)
4851 struct _umtx_op_args uap = {
4853 .op = op & ~UMTX_OP__FLAGS,
4859 if ((uap.op >= nitems(op_table)))
4861 return ((*op_table[uap.op])(td, &uap, ops));
4865 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4867 static const struct umtx_copyops *umtx_ops;
4869 umtx_ops = &umtx_native_ops;
4871 if ((uap->op & (UMTX_OP__32BIT | UMTX_OP__I386)) != 0) {
4872 if ((uap->op & UMTX_OP__I386) != 0)
4873 umtx_ops = &umtx_native_opsi386;
4875 umtx_ops = &umtx_native_opsx32;
4877 #elif !defined(__i386__)
4878 /* We consider UMTX_OP__32BIT a nop on !i386 ILP32. */
4879 if ((uap->op & UMTX_OP__I386) != 0)
4880 umtx_ops = &umtx_native_opsi386;
4882 /* Likewise, UMTX_OP__I386 is a nop on i386. */
4883 if ((uap->op & UMTX_OP__32BIT) != 0)
4884 umtx_ops = &umtx_native_opsx32;
4886 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4887 uap->uaddr2, umtx_ops));
4890 #ifdef COMPAT_FREEBSD32
4891 #ifdef COMPAT_FREEBSD10
4893 freebsd10_freebsd32__umtx_lock(struct thread *td,
4894 struct freebsd10_freebsd32__umtx_lock_args *uap)
4896 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
4900 freebsd10_freebsd32__umtx_unlock(struct thread *td,
4901 struct freebsd10_freebsd32__umtx_unlock_args *uap)
4903 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
4905 #endif /* COMPAT_FREEBSD10 */
4908 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap)
4911 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4912 uap->uaddr2, &umtx_native_ops32));
4914 #endif /* COMPAT_FREEBSD32 */
4917 umtx_thread_init(struct thread *td)
4920 td->td_umtxq = umtxq_alloc();
4921 td->td_umtxq->uq_thread = td;
4925 umtx_thread_fini(struct thread *td)
4928 umtxq_free(td->td_umtxq);
4932 * It will be called when new thread is created, e.g fork().
4935 umtx_thread_alloc(struct thread *td)
4940 uq->uq_inherited_pri = PRI_MAX;
4942 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4943 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4944 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4945 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4951 * Clear robust lists for all process' threads, not delaying the
4952 * cleanup to thread exit, since the relevant address space is
4953 * destroyed right now.
4956 umtx_exec(struct proc *p)
4960 KASSERT(p == curproc, ("need curproc"));
4961 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4962 (p->p_flag & P_STOPPED_SINGLE) != 0,
4963 ("curproc must be single-threaded"));
4965 * There is no need to lock the list as only this thread can be
4968 FOREACH_THREAD_IN_PROC(p, td) {
4969 KASSERT(td == curthread ||
4970 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4971 ("running thread %p %p", p, td));
4972 umtx_thread_cleanup(td);
4973 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4981 umtx_thread_exit(struct thread *td)
4984 umtx_thread_cleanup(td);
4988 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res, bool compat32)
4995 error = fueword32((void *)ptr, &res32);
4999 error = fueword((void *)ptr, &res1);
5009 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list,
5012 struct umutex32 m32;
5015 memcpy(&m32, m, sizeof(m32));
5016 *rb_list = m32.m_rb_lnk;
5018 *rb_list = m->m_rb_lnk;
5023 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact,
5029 KASSERT(td->td_proc == curproc, ("need current vmspace"));
5030 error = copyin((void *)rbp, &m, sizeof(m));
5033 if (rb_list != NULL)
5034 umtx_read_rb_list(td, &m, rb_list, compat32);
5035 if ((m.m_flags & UMUTEX_ROBUST) == 0)
5037 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
5038 /* inact is cleared after unlock, allow the inconsistency */
5039 return (inact ? 0 : EINVAL);
5040 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
5044 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
5045 const char *name, bool compat32)
5053 error = umtx_read_uptr(td, rb_list, &rbp, compat32);
5054 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
5055 if (rbp == *rb_inact) {
5060 error = umtx_handle_rb(td, rbp, &rbp, inact, compat32);
5062 if (i == umtx_max_rb && umtx_verbose_rb) {
5063 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
5064 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
5066 if (error != 0 && umtx_verbose_rb) {
5067 uprintf("comm %s pid %d: handling %srb error %d\n",
5068 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
5073 * Clean up umtx data.
5076 umtx_thread_cleanup(struct thread *td)
5084 * Disown pi mutexes.
5088 if (uq->uq_inherited_pri != PRI_MAX ||
5089 !TAILQ_EMPTY(&uq->uq_pi_contested)) {
5090 mtx_lock(&umtx_lock);
5091 uq->uq_inherited_pri = PRI_MAX;
5092 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
5093 pi->pi_owner = NULL;
5094 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
5096 mtx_unlock(&umtx_lock);
5098 sched_lend_user_prio_cond(td, PRI_MAX);
5101 compat32 = (td->td_pflags2 & TDP2_COMPAT32RB) != 0;
5102 td->td_pflags2 &= ~TDP2_COMPAT32RB;
5104 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
5108 * Handle terminated robust mutexes. Must be done after
5109 * robust pi disown, otherwise unlock could see unowned
5112 rb_inact = td->td_rb_inact;
5114 (void)umtx_read_uptr(td, rb_inact, &rb_inact, compat32);
5115 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "", compat32);
5116 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ", compat32);
5118 (void)umtx_handle_rb(td, rb_inact, NULL, true, compat32);