2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015, 2016 The FreeBSD Foundation
5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
9 * Portions of this software were developed by Konstantin Belousov
10 * under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_umtx_profiling.h"
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
43 #include <sys/filedesc.h>
44 #include <sys/limits.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysent.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/taskqueue.h>
64 #include <sys/eventhandler.h>
66 #include <sys/umtxvar.h>
68 #include <security/mac/mac_framework.h>
71 #include <vm/vm_param.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_object.h>
76 #include <machine/atomic.h>
77 #include <machine/cpu.h>
79 #include <compat/freebsd32/freebsd32.h>
80 #ifdef COMPAT_FREEBSD32
81 #include <compat/freebsd32/freebsd32_proto.h>
85 #define _UMUTEX_WAIT 2
88 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
89 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
92 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
94 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do { \
95 struct umtxq_chain *uc; \
97 uc = umtxq_getchain(key); \
98 mtx_assert(&uc->uc_lock, MA_OWNED); \
99 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); \
102 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do {} while (0)
106 * Don't propagate time-sharing priority, there is a security reason,
107 * a user can simply introduce PI-mutex, let thread A lock the mutex,
108 * and let another thread B block on the mutex, because B is
109 * sleeping, its priority will be boosted, this causes A's priority to
110 * be boosted via priority propagating too and will never be lowered even
111 * if it is using 100%CPU, this is unfair to other processes.
114 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
115 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
116 PRI_MAX_TIMESHARE : (td)->td_user_pri)
118 #define GOLDEN_RATIO_PRIME 2654404609U
120 #define UMTX_CHAINS 512
122 #define UMTX_SHIFTS (__WORD_BIT - 9)
124 #define GET_SHARE(flags) \
125 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
127 #define BUSY_SPINS 200
129 struct umtx_copyops {
130 int (*copyin_timeout)(const void *uaddr, struct timespec *tsp);
131 int (*copyin_umtx_time)(const void *uaddr, size_t size,
132 struct _umtx_time *tp);
133 int (*copyin_robust_lists)(const void *uaddr, size_t size,
134 struct umtx_robust_lists_params *rbp);
135 int (*copyout_timeout)(void *uaddr, size_t size,
136 struct timespec *tsp);
137 const size_t timespec_sz;
138 const size_t umtx_time_sz;
142 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
143 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
144 __offsetof(struct umutex32, m_spare[0]), "m_spare32");
146 int umtx_shm_vnobj_persistent = 0;
147 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
148 &umtx_shm_vnobj_persistent, 0,
149 "False forces destruction of umtx attached to file, on last close");
150 static int umtx_max_rb = 1000;
151 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
153 "Maximum number of robust mutexes allowed for each thread");
155 static uma_zone_t umtx_pi_zone;
156 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
157 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
158 static int umtx_pi_allocated;
160 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
162 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
163 &umtx_pi_allocated, 0, "Allocated umtx_pi");
164 static int umtx_verbose_rb = 1;
165 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
169 #ifdef UMTX_PROFILING
170 static long max_length;
171 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
172 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
176 static inline void umtx_abs_timeout_init2(struct umtx_abs_timeout *timo,
177 const struct _umtx_time *umtxtime);
179 static void umtx_shm_init(void);
180 static void umtxq_sysinit(void *);
181 static void umtxq_hash(struct umtx_key *key);
182 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags,
184 static void umtx_thread_cleanup(struct thread *td);
185 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
187 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
189 static struct mtx umtx_lock;
191 #ifdef UMTX_PROFILING
193 umtx_init_profiling(void)
195 struct sysctl_oid *chain_oid;
199 for (i = 0; i < UMTX_CHAINS; ++i) {
200 snprintf(chain_name, sizeof(chain_name), "%d", i);
201 chain_oid = SYSCTL_ADD_NODE(NULL,
202 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
203 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
205 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
206 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
207 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
208 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
213 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
217 struct umtxq_chain *uc;
218 u_int fract, i, j, tot, whole;
219 u_int sf0, sf1, sf2, sf3, sf4;
220 u_int si0, si1, si2, si3, si4;
221 u_int sw0, sw1, sw2, sw3, sw4;
223 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
224 for (i = 0; i < 2; i++) {
226 for (j = 0; j < UMTX_CHAINS; ++j) {
227 uc = &umtxq_chains[i][j];
228 mtx_lock(&uc->uc_lock);
229 tot += uc->max_length;
230 mtx_unlock(&uc->uc_lock);
233 sbuf_printf(&sb, "%u) Empty ", i);
235 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
236 si0 = si1 = si2 = si3 = si4 = 0;
237 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
238 for (j = 0; j < UMTX_CHAINS; j++) {
239 uc = &umtxq_chains[i][j];
240 mtx_lock(&uc->uc_lock);
241 whole = uc->max_length * 100;
242 mtx_unlock(&uc->uc_lock);
243 fract = (whole % tot) * 100;
244 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
248 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
253 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
258 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
263 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
270 sbuf_printf(&sb, "queue %u:\n", i);
271 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
273 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
275 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
277 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
279 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
285 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
291 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
293 struct umtxq_chain *uc;
298 error = sysctl_handle_int(oidp, &clear, 0, req);
299 if (error != 0 || req->newptr == NULL)
303 for (i = 0; i < 2; ++i) {
304 for (j = 0; j < UMTX_CHAINS; ++j) {
305 uc = &umtxq_chains[i][j];
306 mtx_lock(&uc->uc_lock);
309 mtx_unlock(&uc->uc_lock);
316 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
317 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
318 sysctl_debug_umtx_chains_clear, "I",
319 "Clear umtx chains statistics");
320 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
321 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
322 sysctl_debug_umtx_chains_peaks, "A",
323 "Highest peaks in chains max length");
327 umtxq_sysinit(void *arg __unused)
331 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
332 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
333 for (i = 0; i < 2; ++i) {
334 for (j = 0; j < UMTX_CHAINS; ++j) {
335 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
336 MTX_DEF | MTX_DUPOK);
337 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
338 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
339 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
340 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
341 umtxq_chains[i][j].uc_busy = 0;
342 umtxq_chains[i][j].uc_waiters = 0;
343 #ifdef UMTX_PROFILING
344 umtxq_chains[i][j].length = 0;
345 umtxq_chains[i][j].max_length = 0;
349 #ifdef UMTX_PROFILING
350 umtx_init_profiling();
352 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
361 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
362 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX,
364 TAILQ_INIT(&uq->uq_spare_queue->head);
365 TAILQ_INIT(&uq->uq_pi_contested);
366 uq->uq_inherited_pri = PRI_MAX;
371 umtxq_free(struct umtx_q *uq)
374 MPASS(uq->uq_spare_queue != NULL);
375 free(uq->uq_spare_queue, M_UMTX);
380 umtxq_hash(struct umtx_key *key)
384 n = (uintptr_t)key->info.both.a + key->info.both.b;
385 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
389 umtxq_getchain(struct umtx_key *key)
392 if (key->type <= TYPE_SEM)
393 return (&umtxq_chains[1][key->hash]);
394 return (&umtxq_chains[0][key->hash]);
398 * Set chain to busy state when following operation
399 * may be blocked (kernel mutex can not be used).
402 umtxq_busy(struct umtx_key *key)
404 struct umtxq_chain *uc;
406 uc = umtxq_getchain(key);
407 mtx_assert(&uc->uc_lock, MA_OWNED);
411 int count = BUSY_SPINS;
414 while (uc->uc_busy && --count > 0)
420 while (uc->uc_busy) {
422 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
433 umtxq_unbusy(struct umtx_key *key)
435 struct umtxq_chain *uc;
437 uc = umtxq_getchain(key);
438 mtx_assert(&uc->uc_lock, MA_OWNED);
439 KASSERT(uc->uc_busy != 0, ("not busy"));
446 umtxq_unbusy_unlocked(struct umtx_key *key)
454 static struct umtxq_queue *
455 umtxq_queue_lookup(struct umtx_key *key, int q)
457 struct umtxq_queue *uh;
458 struct umtxq_chain *uc;
460 uc = umtxq_getchain(key);
461 UMTXQ_LOCKED_ASSERT(uc);
462 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
463 if (umtx_key_match(&uh->key, key))
471 umtxq_insert_queue(struct umtx_q *uq, int q)
473 struct umtxq_queue *uh;
474 struct umtxq_chain *uc;
476 uc = umtxq_getchain(&uq->uq_key);
477 UMTXQ_LOCKED_ASSERT(uc);
478 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
479 uh = umtxq_queue_lookup(&uq->uq_key, q);
481 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
483 uh = uq->uq_spare_queue;
484 uh->key = uq->uq_key;
485 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
486 #ifdef UMTX_PROFILING
488 if (uc->length > uc->max_length) {
489 uc->max_length = uc->length;
490 if (uc->max_length > max_length)
491 max_length = uc->max_length;
495 uq->uq_spare_queue = NULL;
497 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
499 uq->uq_flags |= UQF_UMTXQ;
500 uq->uq_cur_queue = uh;
505 umtxq_remove_queue(struct umtx_q *uq, int q)
507 struct umtxq_chain *uc;
508 struct umtxq_queue *uh;
510 uc = umtxq_getchain(&uq->uq_key);
511 UMTXQ_LOCKED_ASSERT(uc);
512 if (uq->uq_flags & UQF_UMTXQ) {
513 uh = uq->uq_cur_queue;
514 TAILQ_REMOVE(&uh->head, uq, uq_link);
516 uq->uq_flags &= ~UQF_UMTXQ;
517 if (TAILQ_EMPTY(&uh->head)) {
518 KASSERT(uh->length == 0,
519 ("inconsistent umtxq_queue length"));
520 #ifdef UMTX_PROFILING
523 LIST_REMOVE(uh, link);
525 uh = LIST_FIRST(&uc->uc_spare_queue);
526 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
527 LIST_REMOVE(uh, link);
529 uq->uq_spare_queue = uh;
530 uq->uq_cur_queue = NULL;
535 * Check if there are multiple waiters
538 umtxq_count(struct umtx_key *key)
540 struct umtxq_queue *uh;
542 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
543 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
550 * Check if there are multiple PI waiters and returns first
554 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
556 struct umtxq_queue *uh;
559 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
560 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
562 *first = TAILQ_FIRST(&uh->head);
569 * Wake up threads waiting on an userland object by a bit mask.
572 umtxq_signal_mask(struct umtx_key *key, int n_wake, u_int bitset)
574 struct umtxq_queue *uh;
575 struct umtx_q *uq, *uq_temp;
579 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
580 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
583 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) {
584 if ((uq->uq_bitset & bitset) == 0)
586 umtxq_remove_queue(uq, UMTX_SHARED_QUEUE);
595 * Wake up threads waiting on an userland object.
599 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
601 struct umtxq_queue *uh;
606 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
607 uh = umtxq_queue_lookup(key, q);
609 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
610 umtxq_remove_queue(uq, q);
620 * Wake up specified thread.
623 umtxq_signal_thread(struct umtx_q *uq)
626 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
632 * Wake up a maximum of n_wake threads that are waiting on an userland
633 * object identified by key. The remaining threads are removed from queue
634 * identified by key and added to the queue identified by key2 (requeued).
635 * The n_requeue specifies an upper limit on the number of threads that
636 * are requeued to the second queue.
639 umtxq_requeue(struct umtx_key *key, int n_wake, struct umtx_key *key2,
642 struct umtxq_queue *uh;
643 struct umtx_q *uq, *uq_temp;
647 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
648 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key2));
649 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
652 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) {
653 if (++ret <= n_wake) {
660 if (ret - n_wake == n_requeue)
668 tstohz(const struct timespec *tsp)
672 TIMESPEC_TO_TIMEVAL(&tv, tsp);
677 umtx_abs_timeout_init(struct umtx_abs_timeout *timo, int clockid,
678 int absolute, const struct timespec *timeout)
681 timo->clockid = clockid;
683 timo->is_abs_real = false;
684 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
685 timespecadd(&timo->cur, timeout, &timo->end);
687 timo->end = *timeout;
688 timo->is_abs_real = clockid == CLOCK_REALTIME ||
689 clockid == CLOCK_REALTIME_FAST ||
690 clockid == CLOCK_REALTIME_PRECISE ||
691 clockid == CLOCK_SECOND;
696 umtx_abs_timeout_init2(struct umtx_abs_timeout *timo,
697 const struct _umtx_time *umtxtime)
700 umtx_abs_timeout_init(timo, umtxtime->_clockid,
701 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
705 umtx_abs_timeout_getsbt(struct umtx_abs_timeout *timo, sbintime_t *sbt,
708 struct bintime bt, bbt;
711 switch (timo->clockid) {
713 /* Clocks that can be converted into absolute time. */
715 case CLOCK_REALTIME_PRECISE:
716 case CLOCK_REALTIME_FAST:
717 case CLOCK_MONOTONIC:
718 case CLOCK_MONOTONIC_PRECISE:
719 case CLOCK_MONOTONIC_FAST:
721 case CLOCK_UPTIME_PRECISE:
722 case CLOCK_UPTIME_FAST:
724 timespec2bintime(&timo->end, &bt);
725 switch (timo->clockid) {
727 case CLOCK_REALTIME_PRECISE:
728 case CLOCK_REALTIME_FAST:
730 getboottimebin(&bbt);
731 bintime_sub(&bt, &bbt);
736 if (bt.sec >= (SBT_MAX >> 32)) {
742 switch (timo->clockid) {
743 case CLOCK_REALTIME_FAST:
744 case CLOCK_MONOTONIC_FAST:
745 case CLOCK_UPTIME_FAST:
755 /* Clocks that has to be periodically polled. */
758 case CLOCK_THREAD_CPUTIME_ID:
759 case CLOCK_PROCESS_CPUTIME_ID:
761 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
762 if (timespeccmp(&timo->end, &timo->cur, <=))
764 timespecsub(&timo->end, &timo->cur, &tts);
765 *sbt = tick_sbt * tstohz(&tts);
766 *flags = C_HARDCLOCK;
772 umtx_unlock_val(uint32_t flags, bool rb)
776 return (UMUTEX_RB_OWNERDEAD);
777 else if ((flags & UMUTEX_NONCONSISTENT) != 0)
778 return (UMUTEX_RB_NOTRECOV);
780 return (UMUTEX_UNOWNED);
785 * Put thread into sleep state, before sleeping, check if
786 * thread was removed from umtx queue.
789 umtxq_sleep(struct umtx_q *uq, const char *wmesg,
790 struct umtx_abs_timeout *timo)
792 struct umtxq_chain *uc;
794 int error, flags = 0;
796 uc = umtxq_getchain(&uq->uq_key);
797 UMTXQ_LOCKED_ASSERT(uc);
799 if (!(uq->uq_flags & UQF_UMTXQ)) {
804 if (timo->is_abs_real)
805 curthread->td_rtcgen =
806 atomic_load_acq_int(&rtc_generation);
807 error = umtx_abs_timeout_getsbt(timo, &sbt, &flags);
811 error = msleep_sbt(uq, &uc->uc_lock, PCATCH | PDROP, wmesg,
813 uc = umtxq_getchain(&uq->uq_key);
814 mtx_lock(&uc->uc_lock);
815 if (error == EINTR || error == ERESTART)
817 if (error == EWOULDBLOCK && (flags & C_ABSOLUTE) != 0) {
823 curthread->td_rtcgen = 0;
828 * Convert userspace address into unique logical address.
831 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
833 struct thread *td = curthread;
835 vm_map_entry_t entry;
841 if (share == THREAD_SHARE) {
843 key->info.private.vs = td->td_proc->p_vmspace;
844 key->info.private.addr = (uintptr_t)addr;
846 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
847 map = &td->td_proc->p_vmspace->vm_map;
848 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
849 &entry, &key->info.shared.object, &pindex, &prot,
850 &wired) != KERN_SUCCESS) {
854 if ((share == PROCESS_SHARE) ||
855 (share == AUTO_SHARE &&
856 VM_INHERIT_SHARE == entry->inheritance)) {
858 key->info.shared.offset = (vm_offset_t)addr -
859 entry->start + entry->offset;
860 vm_object_reference(key->info.shared.object);
863 key->info.private.vs = td->td_proc->p_vmspace;
864 key->info.private.addr = (uintptr_t)addr;
866 vm_map_lookup_done(map, entry);
877 umtx_key_release(struct umtx_key *key)
880 vm_object_deallocate(key->info.shared.object);
883 #ifdef COMPAT_FREEBSD10
885 * Lock a umtx object.
888 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id,
889 const struct timespec *timeout)
891 struct umtx_abs_timeout timo;
899 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
902 * Care must be exercised when dealing with umtx structure. It
903 * can fault on any access.
907 * Try the uncontested case. This should be done in userland.
909 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id);
911 /* The acquire succeeded. */
912 if (owner == UMTX_UNOWNED)
915 /* The address was invalid. */
919 /* If no one owns it but it is contested try to acquire it. */
920 if (owner == UMTX_CONTESTED) {
921 owner = casuword(&umtx->u_owner,
922 UMTX_CONTESTED, id | UMTX_CONTESTED);
924 if (owner == UMTX_CONTESTED)
927 /* The address was invalid. */
931 error = thread_check_susp(td, false);
935 /* If this failed the lock has changed, restart. */
940 * If we caught a signal, we have retried and now
946 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK,
947 AUTO_SHARE, &uq->uq_key)) != 0)
950 umtxq_lock(&uq->uq_key);
951 umtxq_busy(&uq->uq_key);
953 umtxq_unbusy(&uq->uq_key);
954 umtxq_unlock(&uq->uq_key);
957 * Set the contested bit so that a release in user space
958 * knows to use the system call for unlock. If this fails
959 * either some one else has acquired the lock or it has been
962 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
964 /* The address was invalid. */
966 umtxq_lock(&uq->uq_key);
968 umtxq_unlock(&uq->uq_key);
969 umtx_key_release(&uq->uq_key);
974 * We set the contested bit, sleep. Otherwise the lock changed
975 * and we need to retry or we lost a race to the thread
976 * unlocking the umtx.
978 umtxq_lock(&uq->uq_key);
980 error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL :
983 umtxq_unlock(&uq->uq_key);
984 umtx_key_release(&uq->uq_key);
987 error = thread_check_susp(td, false);
990 if (timeout == NULL) {
991 /* Mutex locking is restarted if it is interrupted. */
995 /* Timed-locking is not restarted. */
996 if (error == ERESTART)
1003 * Unlock a umtx object.
1006 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
1008 struct umtx_key key;
1015 * Make sure we own this mtx.
1017 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
1021 if ((owner & ~UMTX_CONTESTED) != id)
1024 /* This should be done in userland */
1025 if ((owner & UMTX_CONTESTED) == 0) {
1026 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
1034 /* We should only ever be in here for contested locks */
1035 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1041 count = umtxq_count(&key);
1045 * When unlocking the umtx, it must be marked as unowned if
1046 * there is zero or one thread only waiting for it.
1047 * Otherwise, it must be marked as contested.
1049 old = casuword(&umtx->u_owner, owner,
1050 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
1052 umtxq_signal(&key,1);
1055 umtx_key_release(&key);
1063 #ifdef COMPAT_FREEBSD32
1066 * Lock a umtx object.
1069 do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id,
1070 const struct timespec *timeout)
1072 struct umtx_abs_timeout timo;
1080 if (timeout != NULL)
1081 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
1084 * Care must be exercised when dealing with umtx structure. It
1085 * can fault on any access.
1089 * Try the uncontested case. This should be done in userland.
1091 owner = casuword32(m, UMUTEX_UNOWNED, id);
1093 /* The acquire succeeded. */
1094 if (owner == UMUTEX_UNOWNED)
1097 /* The address was invalid. */
1101 /* If no one owns it but it is contested try to acquire it. */
1102 if (owner == UMUTEX_CONTESTED) {
1103 owner = casuword32(m,
1104 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1105 if (owner == UMUTEX_CONTESTED)
1108 /* The address was invalid. */
1112 error = thread_check_susp(td, false);
1116 /* If this failed the lock has changed, restart. */
1121 * If we caught a signal, we have retried and now
1127 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK,
1128 AUTO_SHARE, &uq->uq_key)) != 0)
1131 umtxq_lock(&uq->uq_key);
1132 umtxq_busy(&uq->uq_key);
1134 umtxq_unbusy(&uq->uq_key);
1135 umtxq_unlock(&uq->uq_key);
1138 * Set the contested bit so that a release in user space
1139 * knows to use the system call for unlock. If this fails
1140 * either some one else has acquired the lock or it has been
1143 old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
1145 /* The address was invalid. */
1147 umtxq_lock(&uq->uq_key);
1149 umtxq_unlock(&uq->uq_key);
1150 umtx_key_release(&uq->uq_key);
1155 * We set the contested bit, sleep. Otherwise the lock changed
1156 * and we need to retry or we lost a race to the thread
1157 * unlocking the umtx.
1159 umtxq_lock(&uq->uq_key);
1161 error = umtxq_sleep(uq, "umtx", timeout == NULL ?
1164 umtxq_unlock(&uq->uq_key);
1165 umtx_key_release(&uq->uq_key);
1168 error = thread_check_susp(td, false);
1171 if (timeout == NULL) {
1172 /* Mutex locking is restarted if it is interrupted. */
1176 /* Timed-locking is not restarted. */
1177 if (error == ERESTART)
1184 * Unlock a umtx object.
1187 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id)
1189 struct umtx_key key;
1196 * Make sure we own this mtx.
1198 owner = fuword32(m);
1202 if ((owner & ~UMUTEX_CONTESTED) != id)
1205 /* This should be done in userland */
1206 if ((owner & UMUTEX_CONTESTED) == 0) {
1207 old = casuword32(m, owner, UMUTEX_UNOWNED);
1215 /* We should only ever be in here for contested locks */
1216 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1222 count = umtxq_count(&key);
1226 * When unlocking the umtx, it must be marked as unowned if
1227 * there is zero or one thread only waiting for it.
1228 * Otherwise, it must be marked as contested.
1230 old = casuword32(m, owner,
1231 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1233 umtxq_signal(&key,1);
1236 umtx_key_release(&key);
1243 #endif /* COMPAT_FREEBSD32 */
1244 #endif /* COMPAT_FREEBSD10 */
1247 * Fetch and compare value, sleep on the address if value is not changed.
1250 do_wait(struct thread *td, void *addr, u_long id,
1251 struct _umtx_time *timeout, int compat32, int is_private)
1253 struct umtx_abs_timeout timo;
1260 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
1261 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1264 if (timeout != NULL)
1265 umtx_abs_timeout_init2(&timo, timeout);
1267 umtxq_lock(&uq->uq_key);
1269 umtxq_unlock(&uq->uq_key);
1270 if (compat32 == 0) {
1271 error = fueword(addr, &tmp);
1275 error = fueword32(addr, &tmp32);
1281 umtxq_lock(&uq->uq_key);
1284 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
1286 if ((uq->uq_flags & UQF_UMTXQ) == 0)
1290 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
1293 umtxq_unlock(&uq->uq_key);
1294 umtx_key_release(&uq->uq_key);
1295 if (error == ERESTART)
1301 * Wake up threads sleeping on the specified address.
1304 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1306 struct umtx_key key;
1309 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1310 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1313 umtxq_signal(&key, n_wake);
1315 umtx_key_release(&key);
1320 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1323 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1324 struct _umtx_time *timeout, int mode)
1326 struct umtx_abs_timeout timo;
1328 uint32_t owner, old, id;
1334 if (timeout != NULL)
1335 umtx_abs_timeout_init2(&timo, timeout);
1338 * Care must be exercised when dealing with umtx structure. It
1339 * can fault on any access.
1342 rv = fueword32(&m->m_owner, &owner);
1345 if (mode == _UMUTEX_WAIT) {
1346 if (owner == UMUTEX_UNOWNED ||
1347 owner == UMUTEX_CONTESTED ||
1348 owner == UMUTEX_RB_OWNERDEAD ||
1349 owner == UMUTEX_RB_NOTRECOV)
1353 * Robust mutex terminated. Kernel duty is to
1354 * return EOWNERDEAD to the userspace. The
1355 * umutex.m_flags UMUTEX_NONCONSISTENT is set
1356 * by the common userspace code.
1358 if (owner == UMUTEX_RB_OWNERDEAD) {
1359 rv = casueword32(&m->m_owner,
1360 UMUTEX_RB_OWNERDEAD, &owner,
1361 id | UMUTEX_CONTESTED);
1365 MPASS(owner == UMUTEX_RB_OWNERDEAD);
1366 return (EOWNERDEAD); /* success */
1369 rv = thread_check_susp(td, false);
1374 if (owner == UMUTEX_RB_NOTRECOV)
1375 return (ENOTRECOVERABLE);
1378 * Try the uncontested case. This should be
1381 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1383 /* The address was invalid. */
1387 /* The acquire succeeded. */
1389 MPASS(owner == UMUTEX_UNOWNED);
1394 * If no one owns it but it is contested try
1398 if (owner == UMUTEX_CONTESTED) {
1399 rv = casueword32(&m->m_owner,
1400 UMUTEX_CONTESTED, &owner,
1401 id | UMUTEX_CONTESTED);
1402 /* The address was invalid. */
1406 MPASS(owner == UMUTEX_CONTESTED);
1410 rv = thread_check_susp(td, false);
1416 * If this failed the lock has
1422 /* rv == 1 but not contested, likely store failure */
1423 rv = thread_check_susp(td, false);
1428 if (mode == _UMUTEX_TRY)
1432 * If we caught a signal, we have retried and now
1438 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1439 GET_SHARE(flags), &uq->uq_key)) != 0)
1442 umtxq_lock(&uq->uq_key);
1443 umtxq_busy(&uq->uq_key);
1445 umtxq_unlock(&uq->uq_key);
1448 * Set the contested bit so that a release in user space
1449 * knows to use the system call for unlock. If this fails
1450 * either some one else has acquired the lock or it has been
1453 rv = casueword32(&m->m_owner, owner, &old,
1454 owner | UMUTEX_CONTESTED);
1456 /* The address was invalid or casueword failed to store. */
1457 if (rv == -1 || rv == 1) {
1458 umtxq_lock(&uq->uq_key);
1460 umtxq_unbusy(&uq->uq_key);
1461 umtxq_unlock(&uq->uq_key);
1462 umtx_key_release(&uq->uq_key);
1466 rv = thread_check_susp(td, false);
1474 * We set the contested bit, sleep. Otherwise the lock changed
1475 * and we need to retry or we lost a race to the thread
1476 * unlocking the umtx.
1478 umtxq_lock(&uq->uq_key);
1479 umtxq_unbusy(&uq->uq_key);
1480 MPASS(old == owner);
1481 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1484 umtxq_unlock(&uq->uq_key);
1485 umtx_key_release(&uq->uq_key);
1488 error = thread_check_susp(td, false);
1495 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1498 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
1500 struct umtx_key key;
1501 uint32_t owner, old, id, newlock;
1508 * Make sure we own this mtx.
1510 error = fueword32(&m->m_owner, &owner);
1514 if ((owner & ~UMUTEX_CONTESTED) != id)
1517 newlock = umtx_unlock_val(flags, rb);
1518 if ((owner & UMUTEX_CONTESTED) == 0) {
1519 error = casueword32(&m->m_owner, owner, &old, newlock);
1523 error = thread_check_susp(td, false);
1528 MPASS(old == owner);
1532 /* We should only ever be in here for contested locks */
1533 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1539 count = umtxq_count(&key);
1543 * When unlocking the umtx, it must be marked as unowned if
1544 * there is zero or one thread only waiting for it.
1545 * Otherwise, it must be marked as contested.
1548 newlock |= UMUTEX_CONTESTED;
1549 error = casueword32(&m->m_owner, owner, &old, newlock);
1551 umtxq_signal(&key, 1);
1554 umtx_key_release(&key);
1560 error = thread_check_susp(td, false);
1569 * Check if the mutex is available and wake up a waiter,
1570 * only for simple mutex.
1573 do_wake_umutex(struct thread *td, struct umutex *m)
1575 struct umtx_key key;
1582 error = fueword32(&m->m_owner, &owner);
1586 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1587 owner != UMUTEX_RB_NOTRECOV)
1590 error = fueword32(&m->m_flags, &flags);
1594 /* We should only ever be in here for contested locks */
1595 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1601 count = umtxq_count(&key);
1604 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1605 owner != UMUTEX_RB_NOTRECOV) {
1606 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1610 } else if (error == 1) {
1614 umtx_key_release(&key);
1615 error = thread_check_susp(td, false);
1623 if (error == 0 && count != 0) {
1624 MPASS((owner & ~UMUTEX_CONTESTED) == 0 ||
1625 owner == UMUTEX_RB_OWNERDEAD ||
1626 owner == UMUTEX_RB_NOTRECOV);
1627 umtxq_signal(&key, 1);
1631 umtx_key_release(&key);
1636 * Check if the mutex has waiters and tries to fix contention bit.
1639 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1641 struct umtx_key key;
1642 uint32_t owner, old;
1647 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1651 type = TYPE_NORMAL_UMUTEX;
1653 case UMUTEX_PRIO_INHERIT:
1654 type = TYPE_PI_UMUTEX;
1656 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1657 type = TYPE_PI_ROBUST_UMUTEX;
1659 case UMUTEX_PRIO_PROTECT:
1660 type = TYPE_PP_UMUTEX;
1662 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1663 type = TYPE_PP_ROBUST_UMUTEX;
1668 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0)
1674 count = umtxq_count(&key);
1677 error = fueword32(&m->m_owner, &owner);
1682 * Only repair contention bit if there is a waiter, this means
1683 * the mutex is still being referenced by userland code,
1684 * otherwise don't update any memory.
1686 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 &&
1687 (count > 1 || (count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) {
1688 error = casueword32(&m->m_owner, owner, &old,
1689 owner | UMUTEX_CONTESTED);
1695 MPASS(old == owner);
1699 error = thread_check_susp(td, false);
1703 if (error == EFAULT) {
1704 umtxq_signal(&key, INT_MAX);
1705 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1706 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1707 umtxq_signal(&key, 1);
1710 umtx_key_release(&key);
1715 umtx_pi_alloc(int flags)
1719 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1720 TAILQ_INIT(&pi->pi_blocked);
1721 atomic_add_int(&umtx_pi_allocated, 1);
1726 umtx_pi_free(struct umtx_pi *pi)
1728 uma_zfree(umtx_pi_zone, pi);
1729 atomic_add_int(&umtx_pi_allocated, -1);
1733 * Adjust the thread's position on a pi_state after its priority has been
1737 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1739 struct umtx_q *uq, *uq1, *uq2;
1742 mtx_assert(&umtx_lock, MA_OWNED);
1749 * Check if the thread needs to be moved on the blocked chain.
1750 * It needs to be moved if either its priority is lower than
1751 * the previous thread or higher than the next thread.
1753 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1754 uq2 = TAILQ_NEXT(uq, uq_lockq);
1755 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1756 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1758 * Remove thread from blocked chain and determine where
1759 * it should be moved to.
1761 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1762 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1763 td1 = uq1->uq_thread;
1764 MPASS(td1->td_proc->p_magic == P_MAGIC);
1765 if (UPRI(td1) > UPRI(td))
1770 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1772 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1777 static struct umtx_pi *
1778 umtx_pi_next(struct umtx_pi *pi)
1780 struct umtx_q *uq_owner;
1782 if (pi->pi_owner == NULL)
1784 uq_owner = pi->pi_owner->td_umtxq;
1785 if (uq_owner == NULL)
1787 return (uq_owner->uq_pi_blocked);
1791 * Floyd's Cycle-Finding Algorithm.
1794 umtx_pi_check_loop(struct umtx_pi *pi)
1796 struct umtx_pi *pi1; /* fast iterator */
1798 mtx_assert(&umtx_lock, MA_OWNED);
1803 pi = umtx_pi_next(pi);
1806 pi1 = umtx_pi_next(pi1);
1809 pi1 = umtx_pi_next(pi1);
1819 * Propagate priority when a thread is blocked on POSIX
1823 umtx_propagate_priority(struct thread *td)
1829 mtx_assert(&umtx_lock, MA_OWNED);
1832 pi = uq->uq_pi_blocked;
1835 if (umtx_pi_check_loop(pi))
1840 if (td == NULL || td == curthread)
1843 MPASS(td->td_proc != NULL);
1844 MPASS(td->td_proc->p_magic == P_MAGIC);
1847 if (td->td_lend_user_pri > pri)
1848 sched_lend_user_prio(td, pri);
1856 * Pick up the lock that td is blocked on.
1859 pi = uq->uq_pi_blocked;
1862 /* Resort td on the list if needed. */
1863 umtx_pi_adjust_thread(pi, td);
1868 * Unpropagate priority for a PI mutex when a thread blocked on
1869 * it is interrupted by signal or resumed by others.
1872 umtx_repropagate_priority(struct umtx_pi *pi)
1874 struct umtx_q *uq, *uq_owner;
1875 struct umtx_pi *pi2;
1878 mtx_assert(&umtx_lock, MA_OWNED);
1880 if (umtx_pi_check_loop(pi))
1882 while (pi != NULL && pi->pi_owner != NULL) {
1884 uq_owner = pi->pi_owner->td_umtxq;
1886 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1887 uq = TAILQ_FIRST(&pi2->pi_blocked);
1889 if (pri > UPRI(uq->uq_thread))
1890 pri = UPRI(uq->uq_thread);
1894 if (pri > uq_owner->uq_inherited_pri)
1895 pri = uq_owner->uq_inherited_pri;
1896 thread_lock(pi->pi_owner);
1897 sched_lend_user_prio(pi->pi_owner, pri);
1898 thread_unlock(pi->pi_owner);
1899 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1900 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1905 * Insert a PI mutex into owned list.
1908 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1910 struct umtx_q *uq_owner;
1912 uq_owner = owner->td_umtxq;
1913 mtx_assert(&umtx_lock, MA_OWNED);
1914 MPASS(pi->pi_owner == NULL);
1915 pi->pi_owner = owner;
1916 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1920 * Disown a PI mutex, and remove it from the owned list.
1923 umtx_pi_disown(struct umtx_pi *pi)
1926 mtx_assert(&umtx_lock, MA_OWNED);
1927 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1928 pi->pi_owner = NULL;
1932 * Claim ownership of a PI mutex.
1935 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1940 mtx_lock(&umtx_lock);
1941 if (pi->pi_owner == owner) {
1942 mtx_unlock(&umtx_lock);
1946 if (pi->pi_owner != NULL) {
1948 * userland may have already messed the mutex, sigh.
1950 mtx_unlock(&umtx_lock);
1953 umtx_pi_setowner(pi, owner);
1954 uq = TAILQ_FIRST(&pi->pi_blocked);
1956 pri = UPRI(uq->uq_thread);
1958 if (pri < UPRI(owner))
1959 sched_lend_user_prio(owner, pri);
1960 thread_unlock(owner);
1962 mtx_unlock(&umtx_lock);
1967 * Adjust a thread's order position in its blocked PI mutex,
1968 * this may result new priority propagating process.
1971 umtx_pi_adjust(struct thread *td, u_char oldpri)
1977 mtx_lock(&umtx_lock);
1979 * Pick up the lock that td is blocked on.
1981 pi = uq->uq_pi_blocked;
1983 umtx_pi_adjust_thread(pi, td);
1984 umtx_repropagate_priority(pi);
1986 mtx_unlock(&umtx_lock);
1990 * Sleep on a PI mutex.
1993 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner,
1994 const char *wmesg, struct umtx_abs_timeout *timo, bool shared)
1996 struct thread *td, *td1;
2000 struct umtxq_chain *uc;
2002 uc = umtxq_getchain(&pi->pi_key);
2006 KASSERT(td == curthread, ("inconsistent uq_thread"));
2007 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key));
2008 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
2010 mtx_lock(&umtx_lock);
2011 if (pi->pi_owner == NULL) {
2012 mtx_unlock(&umtx_lock);
2013 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid);
2014 mtx_lock(&umtx_lock);
2016 if (pi->pi_owner == NULL)
2017 umtx_pi_setowner(pi, td1);
2018 PROC_UNLOCK(td1->td_proc);
2022 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
2023 pri = UPRI(uq1->uq_thread);
2029 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
2031 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
2033 uq->uq_pi_blocked = pi;
2035 td->td_flags |= TDF_UPIBLOCKED;
2037 umtx_propagate_priority(td);
2038 mtx_unlock(&umtx_lock);
2039 umtxq_unbusy(&uq->uq_key);
2041 error = umtxq_sleep(uq, wmesg, timo);
2044 mtx_lock(&umtx_lock);
2045 uq->uq_pi_blocked = NULL;
2047 td->td_flags &= ~TDF_UPIBLOCKED;
2049 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
2050 umtx_repropagate_priority(pi);
2051 mtx_unlock(&umtx_lock);
2052 umtxq_unlock(&uq->uq_key);
2058 * Add reference count for a PI mutex.
2061 umtx_pi_ref(struct umtx_pi *pi)
2064 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key));
2069 * Decrease reference count for a PI mutex, if the counter
2070 * is decreased to zero, its memory space is freed.
2073 umtx_pi_unref(struct umtx_pi *pi)
2075 struct umtxq_chain *uc;
2077 uc = umtxq_getchain(&pi->pi_key);
2078 UMTXQ_LOCKED_ASSERT(uc);
2079 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
2080 if (--pi->pi_refcount == 0) {
2081 mtx_lock(&umtx_lock);
2082 if (pi->pi_owner != NULL)
2084 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
2085 ("blocked queue not empty"));
2086 mtx_unlock(&umtx_lock);
2087 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
2093 * Find a PI mutex in hash table.
2096 umtx_pi_lookup(struct umtx_key *key)
2098 struct umtxq_chain *uc;
2101 uc = umtxq_getchain(key);
2102 UMTXQ_LOCKED_ASSERT(uc);
2104 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
2105 if (umtx_key_match(&pi->pi_key, key)) {
2113 * Insert a PI mutex into hash table.
2116 umtx_pi_insert(struct umtx_pi *pi)
2118 struct umtxq_chain *uc;
2120 uc = umtxq_getchain(&pi->pi_key);
2121 UMTXQ_LOCKED_ASSERT(uc);
2122 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
2126 * Drop a PI mutex and wakeup a top waiter.
2129 umtx_pi_drop(struct thread *td, struct umtx_key *key, bool rb, int *count)
2131 struct umtx_q *uq_first, *uq_first2, *uq_me;
2132 struct umtx_pi *pi, *pi2;
2135 UMTXQ_ASSERT_LOCKED_BUSY(key);
2136 *count = umtxq_count_pi(key, &uq_first);
2137 if (uq_first != NULL) {
2138 mtx_lock(&umtx_lock);
2139 pi = uq_first->uq_pi_blocked;
2140 KASSERT(pi != NULL, ("pi == NULL?"));
2141 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2142 mtx_unlock(&umtx_lock);
2143 /* userland messed the mutex */
2146 uq_me = td->td_umtxq;
2147 if (pi->pi_owner == td)
2149 /* get highest priority thread which is still sleeping. */
2150 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2151 while (uq_first != NULL &&
2152 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2153 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2156 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2157 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2158 if (uq_first2 != NULL) {
2159 if (pri > UPRI(uq_first2->uq_thread))
2160 pri = UPRI(uq_first2->uq_thread);
2164 sched_lend_user_prio(td, pri);
2166 mtx_unlock(&umtx_lock);
2168 umtxq_signal_thread(uq_first);
2170 pi = umtx_pi_lookup(key);
2172 * A umtx_pi can exist if a signal or timeout removed the
2173 * last waiter from the umtxq, but there is still
2174 * a thread in do_lock_pi() holding the umtx_pi.
2178 * The umtx_pi can be unowned, such as when a thread
2179 * has just entered do_lock_pi(), allocated the
2180 * umtx_pi, and unlocked the umtxq.
2181 * If the current thread owns it, it must disown it.
2183 mtx_lock(&umtx_lock);
2184 if (pi->pi_owner == td)
2186 mtx_unlock(&umtx_lock);
2196 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
2197 struct _umtx_time *timeout, int try)
2199 struct umtx_abs_timeout timo;
2201 struct umtx_pi *pi, *new_pi;
2202 uint32_t id, old_owner, owner, old;
2208 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2209 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2213 if (timeout != NULL)
2214 umtx_abs_timeout_init2(&timo, timeout);
2216 umtxq_lock(&uq->uq_key);
2217 pi = umtx_pi_lookup(&uq->uq_key);
2219 new_pi = umtx_pi_alloc(M_NOWAIT);
2220 if (new_pi == NULL) {
2221 umtxq_unlock(&uq->uq_key);
2222 new_pi = umtx_pi_alloc(M_WAITOK);
2223 umtxq_lock(&uq->uq_key);
2224 pi = umtx_pi_lookup(&uq->uq_key);
2226 umtx_pi_free(new_pi);
2230 if (new_pi != NULL) {
2231 new_pi->pi_key = uq->uq_key;
2232 umtx_pi_insert(new_pi);
2237 umtxq_unlock(&uq->uq_key);
2240 * Care must be exercised when dealing with umtx structure. It
2241 * can fault on any access.
2245 * Try the uncontested case. This should be done in userland.
2247 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
2248 /* The address was invalid. */
2253 /* The acquire succeeded. */
2255 MPASS(owner == UMUTEX_UNOWNED);
2260 if (owner == UMUTEX_RB_NOTRECOV) {
2261 error = ENOTRECOVERABLE;
2266 * Avoid overwriting a possible error from sleep due
2267 * to the pending signal with suspension check result.
2270 error = thread_check_susp(td, true);
2275 /* If no one owns it but it is contested try to acquire it. */
2276 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
2278 rv = casueword32(&m->m_owner, owner, &owner,
2279 id | UMUTEX_CONTESTED);
2280 /* The address was invalid. */
2287 error = thread_check_susp(td, true);
2293 * If this failed the lock could
2300 MPASS(owner == old_owner);
2301 umtxq_lock(&uq->uq_key);
2302 umtxq_busy(&uq->uq_key);
2303 error = umtx_pi_claim(pi, td);
2304 umtxq_unbusy(&uq->uq_key);
2305 umtxq_unlock(&uq->uq_key);
2308 * Since we're going to return an
2309 * error, restore the m_owner to its
2310 * previous, unowned state to avoid
2311 * compounding the problem.
2313 (void)casuword32(&m->m_owner,
2314 id | UMUTEX_CONTESTED, old_owner);
2316 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD)
2321 if ((owner & ~UMUTEX_CONTESTED) == id) {
2332 * If we caught a signal, we have retried and now
2338 umtxq_lock(&uq->uq_key);
2339 umtxq_busy(&uq->uq_key);
2340 umtxq_unlock(&uq->uq_key);
2343 * Set the contested bit so that a release in user space
2344 * knows to use the system call for unlock. If this fails
2345 * either some one else has acquired the lock or it has been
2348 rv = casueword32(&m->m_owner, owner, &old, owner |
2351 /* The address was invalid. */
2353 umtxq_unbusy_unlocked(&uq->uq_key);
2358 umtxq_unbusy_unlocked(&uq->uq_key);
2359 error = thread_check_susp(td, true);
2364 * The lock changed and we need to retry or we
2365 * lost a race to the thread unlocking the
2366 * umtx. Note that the UMUTEX_RB_OWNERDEAD
2367 * value for owner is impossible there.
2372 umtxq_lock(&uq->uq_key);
2374 /* We set the contested bit, sleep. */
2375 MPASS(old == owner);
2376 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
2377 "umtxpi", timeout == NULL ? NULL : &timo,
2378 (flags & USYNC_PROCESS_SHARED) != 0);
2382 error = thread_check_susp(td, false);
2387 umtxq_lock(&uq->uq_key);
2389 umtxq_unlock(&uq->uq_key);
2391 umtx_key_release(&uq->uq_key);
2396 * Unlock a PI mutex.
2399 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2401 struct umtx_key key;
2402 uint32_t id, new_owner, old, owner;
2409 * Make sure we own this mtx.
2411 error = fueword32(&m->m_owner, &owner);
2415 if ((owner & ~UMUTEX_CONTESTED) != id)
2418 new_owner = umtx_unlock_val(flags, rb);
2420 /* This should be done in userland */
2421 if ((owner & UMUTEX_CONTESTED) == 0) {
2422 error = casueword32(&m->m_owner, owner, &old, new_owner);
2426 error = thread_check_susp(td, true);
2436 /* We should only ever be in here for contested locks */
2437 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2438 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags),
2444 error = umtx_pi_drop(td, &key, rb, &count);
2448 umtx_key_release(&key);
2449 /* userland messed the mutex */
2455 * When unlocking the umtx, it must be marked as unowned if
2456 * there is zero or one thread only waiting for it.
2457 * Otherwise, it must be marked as contested.
2461 new_owner |= UMUTEX_CONTESTED;
2463 error = casueword32(&m->m_owner, owner, &old, new_owner);
2465 error = thread_check_susp(td, false);
2469 umtxq_unbusy_unlocked(&key);
2470 umtx_key_release(&key);
2473 if (error == 0 && old != owner)
2482 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2483 struct _umtx_time *timeout, int try)
2485 struct umtx_abs_timeout timo;
2486 struct umtx_q *uq, *uq2;
2490 int error, pri, old_inherited_pri, su, rv;
2494 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2495 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2499 if (timeout != NULL)
2500 umtx_abs_timeout_init2(&timo, timeout);
2502 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2504 old_inherited_pri = uq->uq_inherited_pri;
2505 umtxq_lock(&uq->uq_key);
2506 umtxq_busy(&uq->uq_key);
2507 umtxq_unlock(&uq->uq_key);
2509 rv = fueword32(&m->m_ceilings[0], &ceiling);
2514 ceiling = RTP_PRIO_MAX - ceiling;
2515 if (ceiling > RTP_PRIO_MAX) {
2520 mtx_lock(&umtx_lock);
2521 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2522 mtx_unlock(&umtx_lock);
2526 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2527 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2529 if (uq->uq_inherited_pri < UPRI(td))
2530 sched_lend_user_prio(td, uq->uq_inherited_pri);
2533 mtx_unlock(&umtx_lock);
2535 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2536 id | UMUTEX_CONTESTED);
2537 /* The address was invalid. */
2543 MPASS(owner == UMUTEX_CONTESTED);
2548 if (owner == UMUTEX_RB_OWNERDEAD) {
2549 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2550 &owner, id | UMUTEX_CONTESTED);
2556 MPASS(owner == UMUTEX_RB_OWNERDEAD);
2557 error = EOWNERDEAD; /* success */
2562 * rv == 1, only check for suspension if we
2563 * did not already catched a signal. If we
2564 * get an error from the check, the same
2565 * condition is checked by the umtxq_sleep()
2566 * call below, so we should obliterate the
2567 * error to not skip the last loop iteration.
2570 error = thread_check_susp(td, false);
2579 } else if (owner == UMUTEX_RB_NOTRECOV) {
2580 error = ENOTRECOVERABLE;
2587 * If we caught a signal, we have retried and now
2593 umtxq_lock(&uq->uq_key);
2595 umtxq_unbusy(&uq->uq_key);
2596 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2599 umtxq_unlock(&uq->uq_key);
2601 mtx_lock(&umtx_lock);
2602 uq->uq_inherited_pri = old_inherited_pri;
2604 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2605 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2607 if (pri > UPRI(uq2->uq_thread))
2608 pri = UPRI(uq2->uq_thread);
2611 if (pri > uq->uq_inherited_pri)
2612 pri = uq->uq_inherited_pri;
2614 sched_lend_user_prio(td, pri);
2616 mtx_unlock(&umtx_lock);
2619 if (error != 0 && error != EOWNERDEAD) {
2620 mtx_lock(&umtx_lock);
2621 uq->uq_inherited_pri = old_inherited_pri;
2623 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2624 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2626 if (pri > UPRI(uq2->uq_thread))
2627 pri = UPRI(uq2->uq_thread);
2630 if (pri > uq->uq_inherited_pri)
2631 pri = uq->uq_inherited_pri;
2633 sched_lend_user_prio(td, pri);
2635 mtx_unlock(&umtx_lock);
2639 umtxq_unbusy_unlocked(&uq->uq_key);
2640 umtx_key_release(&uq->uq_key);
2645 * Unlock a PP mutex.
2648 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
2650 struct umtx_key key;
2651 struct umtx_q *uq, *uq2;
2653 uint32_t id, owner, rceiling;
2654 int error, pri, new_inherited_pri, su;
2658 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2661 * Make sure we own this mtx.
2663 error = fueword32(&m->m_owner, &owner);
2667 if ((owner & ~UMUTEX_CONTESTED) != id)
2670 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2675 new_inherited_pri = PRI_MAX;
2677 rceiling = RTP_PRIO_MAX - rceiling;
2678 if (rceiling > RTP_PRIO_MAX)
2680 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2683 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2684 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2691 * For priority protected mutex, always set unlocked state
2692 * to UMUTEX_CONTESTED, so that userland always enters kernel
2693 * to lock the mutex, it is necessary because thread priority
2694 * has to be adjusted for such mutex.
2696 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) |
2701 umtxq_signal(&key, 1);
2708 mtx_lock(&umtx_lock);
2710 uq->uq_inherited_pri = new_inherited_pri;
2712 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2713 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2715 if (pri > UPRI(uq2->uq_thread))
2716 pri = UPRI(uq2->uq_thread);
2719 if (pri > uq->uq_inherited_pri)
2720 pri = uq->uq_inherited_pri;
2722 sched_lend_user_prio(td, pri);
2724 mtx_unlock(&umtx_lock);
2726 umtx_key_release(&key);
2731 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2732 uint32_t *old_ceiling)
2735 uint32_t flags, id, owner, save_ceiling;
2738 error = fueword32(&m->m_flags, &flags);
2741 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2743 if (ceiling > RTP_PRIO_MAX)
2747 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ?
2748 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags),
2752 umtxq_lock(&uq->uq_key);
2753 umtxq_busy(&uq->uq_key);
2754 umtxq_unlock(&uq->uq_key);
2756 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2762 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2763 id | UMUTEX_CONTESTED);
2770 MPASS(owner == UMUTEX_CONTESTED);
2771 rv = suword32(&m->m_ceilings[0], ceiling);
2772 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2773 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2777 if ((owner & ~UMUTEX_CONTESTED) == id) {
2778 rv = suword32(&m->m_ceilings[0], ceiling);
2779 error = rv == 0 ? 0 : EFAULT;
2783 if (owner == UMUTEX_RB_OWNERDEAD) {
2786 } else if (owner == UMUTEX_RB_NOTRECOV) {
2787 error = ENOTRECOVERABLE;
2792 * If we caught a signal, we have retried and now
2799 * We set the contested bit, sleep. Otherwise the lock changed
2800 * and we need to retry or we lost a race to the thread
2801 * unlocking the umtx.
2803 umtxq_lock(&uq->uq_key);
2805 umtxq_unbusy(&uq->uq_key);
2806 error = umtxq_sleep(uq, "umtxpp", NULL);
2808 umtxq_unlock(&uq->uq_key);
2810 umtxq_lock(&uq->uq_key);
2812 umtxq_signal(&uq->uq_key, INT_MAX);
2813 umtxq_unbusy(&uq->uq_key);
2814 umtxq_unlock(&uq->uq_key);
2815 umtx_key_release(&uq->uq_key);
2816 if (error == 0 && old_ceiling != NULL) {
2817 rv = suword32(old_ceiling, save_ceiling);
2818 error = rv == 0 ? 0 : EFAULT;
2824 * Lock a userland POSIX mutex.
2827 do_lock_umutex(struct thread *td, struct umutex *m,
2828 struct _umtx_time *timeout, int mode)
2833 error = fueword32(&m->m_flags, &flags);
2837 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2839 error = do_lock_normal(td, m, flags, timeout, mode);
2841 case UMUTEX_PRIO_INHERIT:
2842 error = do_lock_pi(td, m, flags, timeout, mode);
2844 case UMUTEX_PRIO_PROTECT:
2845 error = do_lock_pp(td, m, flags, timeout, mode);
2850 if (timeout == NULL) {
2851 if (error == EINTR && mode != _UMUTEX_WAIT)
2854 /* Timed-locking is not restarted. */
2855 if (error == ERESTART)
2862 * Unlock a userland POSIX mutex.
2865 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
2870 error = fueword32(&m->m_flags, &flags);
2874 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2876 return (do_unlock_normal(td, m, flags, rb));
2877 case UMUTEX_PRIO_INHERIT:
2878 return (do_unlock_pi(td, m, flags, rb));
2879 case UMUTEX_PRIO_PROTECT:
2880 return (do_unlock_pp(td, m, flags, rb));
2887 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2888 struct timespec *timeout, u_long wflags)
2890 struct umtx_abs_timeout timo;
2892 uint32_t flags, clockid, hasw;
2896 error = fueword32(&cv->c_flags, &flags);
2899 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2903 if ((wflags & CVWAIT_CLOCKID) != 0) {
2904 error = fueword32(&cv->c_clockid, &clockid);
2906 umtx_key_release(&uq->uq_key);
2909 if (clockid < CLOCK_REALTIME ||
2910 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2911 /* hmm, only HW clock id will work. */
2912 umtx_key_release(&uq->uq_key);
2916 clockid = CLOCK_REALTIME;
2919 umtxq_lock(&uq->uq_key);
2920 umtxq_busy(&uq->uq_key);
2922 umtxq_unlock(&uq->uq_key);
2925 * Set c_has_waiters to 1 before releasing user mutex, also
2926 * don't modify cache line when unnecessary.
2928 error = fueword32(&cv->c_has_waiters, &hasw);
2929 if (error == 0 && hasw == 0)
2930 suword32(&cv->c_has_waiters, 1);
2932 umtxq_unbusy_unlocked(&uq->uq_key);
2934 error = do_unlock_umutex(td, m, false);
2936 if (timeout != NULL)
2937 umtx_abs_timeout_init(&timo, clockid,
2938 (wflags & CVWAIT_ABSTIME) != 0, timeout);
2940 umtxq_lock(&uq->uq_key);
2942 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2946 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2950 * This must be timeout,interrupted by signal or
2951 * surprious wakeup, clear c_has_waiter flag when
2954 umtxq_busy(&uq->uq_key);
2955 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2956 int oldlen = uq->uq_cur_queue->length;
2959 umtxq_unlock(&uq->uq_key);
2960 suword32(&cv->c_has_waiters, 0);
2961 umtxq_lock(&uq->uq_key);
2964 umtxq_unbusy(&uq->uq_key);
2965 if (error == ERESTART)
2969 umtxq_unlock(&uq->uq_key);
2970 umtx_key_release(&uq->uq_key);
2975 * Signal a userland condition variable.
2978 do_cv_signal(struct thread *td, struct ucond *cv)
2980 struct umtx_key key;
2981 int error, cnt, nwake;
2984 error = fueword32(&cv->c_flags, &flags);
2987 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2991 cnt = umtxq_count(&key);
2992 nwake = umtxq_signal(&key, 1);
2995 error = suword32(&cv->c_has_waiters, 0);
3002 umtx_key_release(&key);
3007 do_cv_broadcast(struct thread *td, struct ucond *cv)
3009 struct umtx_key key;
3013 error = fueword32(&cv->c_flags, &flags);
3016 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
3021 umtxq_signal(&key, INT_MAX);
3024 error = suword32(&cv->c_has_waiters, 0);
3028 umtxq_unbusy_unlocked(&key);
3030 umtx_key_release(&key);
3035 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag,
3036 struct _umtx_time *timeout)
3038 struct umtx_abs_timeout timo;
3040 uint32_t flags, wrflags;
3041 int32_t state, oldstate;
3042 int32_t blocked_readers;
3043 int error, error1, rv;
3046 error = fueword32(&rwlock->rw_flags, &flags);
3049 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3053 if (timeout != NULL)
3054 umtx_abs_timeout_init2(&timo, timeout);
3056 wrflags = URWLOCK_WRITE_OWNER;
3057 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
3058 wrflags |= URWLOCK_WRITE_WAITERS;
3061 rv = fueword32(&rwlock->rw_state, &state);
3063 umtx_key_release(&uq->uq_key);
3067 /* try to lock it */
3068 while (!(state & wrflags)) {
3069 if (__predict_false(URWLOCK_READER_COUNT(state) ==
3070 URWLOCK_MAX_READERS)) {
3071 umtx_key_release(&uq->uq_key);
3074 rv = casueword32(&rwlock->rw_state, state,
3075 &oldstate, state + 1);
3077 umtx_key_release(&uq->uq_key);
3081 MPASS(oldstate == state);
3082 umtx_key_release(&uq->uq_key);
3085 error = thread_check_susp(td, true);
3094 /* grab monitor lock */
3095 umtxq_lock(&uq->uq_key);
3096 umtxq_busy(&uq->uq_key);
3097 umtxq_unlock(&uq->uq_key);
3100 * re-read the state, in case it changed between the try-lock above
3101 * and the check below
3103 rv = fueword32(&rwlock->rw_state, &state);
3107 /* set read contention bit */
3108 while (error == 0 && (state & wrflags) &&
3109 !(state & URWLOCK_READ_WAITERS)) {
3110 rv = casueword32(&rwlock->rw_state, state,
3111 &oldstate, state | URWLOCK_READ_WAITERS);
3117 MPASS(oldstate == state);
3121 error = thread_check_susp(td, false);
3126 umtxq_unbusy_unlocked(&uq->uq_key);
3130 /* state is changed while setting flags, restart */
3131 if (!(state & wrflags)) {
3132 umtxq_unbusy_unlocked(&uq->uq_key);
3133 error = thread_check_susp(td, true);
3141 * Contention bit is set, before sleeping, increase
3142 * read waiter count.
3144 rv = fueword32(&rwlock->rw_blocked_readers,
3147 umtxq_unbusy_unlocked(&uq->uq_key);
3151 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
3153 while (state & wrflags) {
3154 umtxq_lock(&uq->uq_key);
3156 umtxq_unbusy(&uq->uq_key);
3158 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
3161 umtxq_busy(&uq->uq_key);
3163 umtxq_unlock(&uq->uq_key);
3166 rv = fueword32(&rwlock->rw_state, &state);
3173 /* decrease read waiter count, and may clear read contention bit */
3174 rv = fueword32(&rwlock->rw_blocked_readers,
3177 umtxq_unbusy_unlocked(&uq->uq_key);
3181 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
3182 if (blocked_readers == 1) {
3183 rv = fueword32(&rwlock->rw_state, &state);
3185 umtxq_unbusy_unlocked(&uq->uq_key);
3190 rv = casueword32(&rwlock->rw_state, state,
3191 &oldstate, state & ~URWLOCK_READ_WAITERS);
3197 MPASS(oldstate == state);
3201 error1 = thread_check_susp(td, false);
3210 umtxq_unbusy_unlocked(&uq->uq_key);
3214 umtx_key_release(&uq->uq_key);
3215 if (error == ERESTART)
3221 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
3223 struct umtx_abs_timeout timo;
3226 int32_t state, oldstate;
3227 int32_t blocked_writers;
3228 int32_t blocked_readers;
3229 int error, error1, rv;
3232 error = fueword32(&rwlock->rw_flags, &flags);
3235 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3239 if (timeout != NULL)
3240 umtx_abs_timeout_init2(&timo, timeout);
3242 blocked_readers = 0;
3244 rv = fueword32(&rwlock->rw_state, &state);
3246 umtx_key_release(&uq->uq_key);
3249 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
3250 URWLOCK_READER_COUNT(state) == 0) {
3251 rv = casueword32(&rwlock->rw_state, state,
3252 &oldstate, state | URWLOCK_WRITE_OWNER);
3254 umtx_key_release(&uq->uq_key);
3258 MPASS(oldstate == state);
3259 umtx_key_release(&uq->uq_key);
3263 error = thread_check_susp(td, true);
3269 if ((state & (URWLOCK_WRITE_OWNER |
3270 URWLOCK_WRITE_WAITERS)) == 0 &&
3271 blocked_readers != 0) {
3272 umtxq_lock(&uq->uq_key);
3273 umtxq_busy(&uq->uq_key);
3274 umtxq_signal_queue(&uq->uq_key, INT_MAX,
3276 umtxq_unbusy(&uq->uq_key);
3277 umtxq_unlock(&uq->uq_key);
3283 /* grab monitor lock */
3284 umtxq_lock(&uq->uq_key);
3285 umtxq_busy(&uq->uq_key);
3286 umtxq_unlock(&uq->uq_key);
3289 * Re-read the state, in case it changed between the
3290 * try-lock above and the check below.
3292 rv = fueword32(&rwlock->rw_state, &state);
3296 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
3297 URWLOCK_READER_COUNT(state) != 0) &&
3298 (state & URWLOCK_WRITE_WAITERS) == 0) {
3299 rv = casueword32(&rwlock->rw_state, state,
3300 &oldstate, state | URWLOCK_WRITE_WAITERS);
3306 MPASS(oldstate == state);
3310 error = thread_check_susp(td, false);
3315 umtxq_unbusy_unlocked(&uq->uq_key);
3319 if ((state & URWLOCK_WRITE_OWNER) == 0 &&
3320 URWLOCK_READER_COUNT(state) == 0) {
3321 umtxq_unbusy_unlocked(&uq->uq_key);
3322 error = thread_check_susp(td, false);
3328 rv = fueword32(&rwlock->rw_blocked_writers,
3331 umtxq_unbusy_unlocked(&uq->uq_key);
3335 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1);
3337 while ((state & URWLOCK_WRITE_OWNER) ||
3338 URWLOCK_READER_COUNT(state) != 0) {
3339 umtxq_lock(&uq->uq_key);
3340 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3341 umtxq_unbusy(&uq->uq_key);
3343 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
3346 umtxq_busy(&uq->uq_key);
3347 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3348 umtxq_unlock(&uq->uq_key);
3351 rv = fueword32(&rwlock->rw_state, &state);
3358 rv = fueword32(&rwlock->rw_blocked_writers,
3361 umtxq_unbusy_unlocked(&uq->uq_key);
3365 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
3366 if (blocked_writers == 1) {
3367 rv = fueword32(&rwlock->rw_state, &state);
3369 umtxq_unbusy_unlocked(&uq->uq_key);
3374 rv = casueword32(&rwlock->rw_state, state,
3375 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
3381 MPASS(oldstate == state);
3385 error1 = thread_check_susp(td, false);
3387 * We are leaving the URWLOCK_WRITE_WAITERS
3388 * behind, but this should not harm the
3397 rv = fueword32(&rwlock->rw_blocked_readers,
3400 umtxq_unbusy_unlocked(&uq->uq_key);
3405 blocked_readers = 0;
3407 umtxq_unbusy_unlocked(&uq->uq_key);
3410 umtx_key_release(&uq->uq_key);
3411 if (error == ERESTART)
3417 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3421 int32_t state, oldstate;
3422 int error, rv, q, count;
3425 error = fueword32(&rwlock->rw_flags, &flags);
3428 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3432 error = fueword32(&rwlock->rw_state, &state);
3437 if (state & URWLOCK_WRITE_OWNER) {
3439 rv = casueword32(&rwlock->rw_state, state,
3440 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3447 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3451 error = thread_check_susp(td, true);
3457 } else if (URWLOCK_READER_COUNT(state) != 0) {
3459 rv = casueword32(&rwlock->rw_state, state,
3460 &oldstate, state - 1);
3467 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3471 error = thread_check_susp(td, true);
3484 if (!(flags & URWLOCK_PREFER_READER)) {
3485 if (state & URWLOCK_WRITE_WAITERS) {
3487 q = UMTX_EXCLUSIVE_QUEUE;
3488 } else if (state & URWLOCK_READ_WAITERS) {
3490 q = UMTX_SHARED_QUEUE;
3493 if (state & URWLOCK_READ_WAITERS) {
3495 q = UMTX_SHARED_QUEUE;
3496 } else if (state & URWLOCK_WRITE_WAITERS) {
3498 q = UMTX_EXCLUSIVE_QUEUE;
3503 umtxq_lock(&uq->uq_key);
3504 umtxq_busy(&uq->uq_key);
3505 umtxq_signal_queue(&uq->uq_key, count, q);
3506 umtxq_unbusy(&uq->uq_key);
3507 umtxq_unlock(&uq->uq_key);
3510 umtx_key_release(&uq->uq_key);
3514 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3516 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3518 struct umtx_abs_timeout timo;
3520 uint32_t flags, count, count1;
3524 error = fueword32(&sem->_flags, &flags);
3527 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3531 if (timeout != NULL)
3532 umtx_abs_timeout_init2(&timo, timeout);
3535 umtxq_lock(&uq->uq_key);
3536 umtxq_busy(&uq->uq_key);
3538 umtxq_unlock(&uq->uq_key);
3539 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3541 rv1 = fueword32(&sem->_count, &count);
3542 if (rv == -1 || (rv == 0 && (rv1 == -1 || count != 0)) ||
3543 (rv == 1 && count1 == 0)) {
3544 umtxq_lock(&uq->uq_key);
3545 umtxq_unbusy(&uq->uq_key);
3547 umtxq_unlock(&uq->uq_key);
3549 rv = thread_check_susp(td, true);
3557 error = rv == -1 ? EFAULT : 0;
3560 umtxq_lock(&uq->uq_key);
3561 umtxq_unbusy(&uq->uq_key);
3563 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3565 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3569 /* A relative timeout cannot be restarted. */
3570 if (error == ERESTART && timeout != NULL &&
3571 (timeout->_flags & UMTX_ABSTIME) == 0)
3574 umtxq_unlock(&uq->uq_key);
3576 umtx_key_release(&uq->uq_key);
3581 * Signal a userland semaphore.
3584 do_sem_wake(struct thread *td, struct _usem *sem)
3586 struct umtx_key key;
3590 error = fueword32(&sem->_flags, &flags);
3593 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3597 cnt = umtxq_count(&key);
3600 * Check if count is greater than 0, this means the memory is
3601 * still being referenced by user code, so we can safely
3602 * update _has_waiters flag.
3606 error = suword32(&sem->_has_waiters, 0);
3611 umtxq_signal(&key, 1);
3615 umtx_key_release(&key);
3621 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
3623 struct umtx_abs_timeout timo;
3625 uint32_t count, flags;
3629 flags = fuword32(&sem->_flags);
3630 if (timeout != NULL)
3631 umtx_abs_timeout_init2(&timo, timeout);
3634 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3637 umtxq_lock(&uq->uq_key);
3638 umtxq_busy(&uq->uq_key);
3640 umtxq_unlock(&uq->uq_key);
3641 rv = fueword32(&sem->_count, &count);
3643 umtxq_lock(&uq->uq_key);
3644 umtxq_unbusy(&uq->uq_key);
3646 umtxq_unlock(&uq->uq_key);
3647 umtx_key_release(&uq->uq_key);
3651 if (USEM_COUNT(count) != 0) {
3652 umtxq_lock(&uq->uq_key);
3653 umtxq_unbusy(&uq->uq_key);
3655 umtxq_unlock(&uq->uq_key);
3656 umtx_key_release(&uq->uq_key);
3659 if (count == USEM_HAS_WAITERS)
3661 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS);
3664 umtxq_lock(&uq->uq_key);
3665 umtxq_unbusy(&uq->uq_key);
3667 umtxq_unlock(&uq->uq_key);
3668 umtx_key_release(&uq->uq_key);
3671 rv = thread_check_susp(td, true);
3676 umtxq_lock(&uq->uq_key);
3677 umtxq_unbusy(&uq->uq_key);
3679 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3681 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3685 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3686 /* A relative timeout cannot be restarted. */
3687 if (error == ERESTART)
3689 if (error == EINTR) {
3690 kern_clock_gettime(curthread, timo.clockid,
3692 timespecsub(&timo.end, &timo.cur,
3693 &timeout->_timeout);
3697 umtxq_unlock(&uq->uq_key);
3698 umtx_key_release(&uq->uq_key);
3703 * Signal a userland semaphore.
3706 do_sem2_wake(struct thread *td, struct _usem2 *sem)
3708 struct umtx_key key;
3710 uint32_t count, flags;
3712 rv = fueword32(&sem->_flags, &flags);
3715 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3719 cnt = umtxq_count(&key);
3722 * If this was the last sleeping thread, clear the waiters
3727 rv = fueword32(&sem->_count, &count);
3728 while (rv != -1 && count & USEM_HAS_WAITERS) {
3729 rv = casueword32(&sem->_count, count, &count,
3730 count & ~USEM_HAS_WAITERS);
3732 rv = thread_check_susp(td, true);
3745 umtxq_signal(&key, 1);
3749 umtx_key_release(&key);
3753 #ifdef COMPAT_FREEBSD10
3755 freebsd10__umtx_lock(struct thread *td, struct freebsd10__umtx_lock_args *uap)
3757 return (do_lock_umtx(td, uap->umtx, td->td_tid, 0));
3761 freebsd10__umtx_unlock(struct thread *td,
3762 struct freebsd10__umtx_unlock_args *uap)
3764 return (do_unlock_umtx(td, uap->umtx, td->td_tid));
3769 umtx_copyin_timeout(const void *uaddr, struct timespec *tsp)
3773 error = copyin(uaddr, tsp, sizeof(*tsp));
3775 if (tsp->tv_sec < 0 ||
3776 tsp->tv_nsec >= 1000000000 ||
3784 umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp)
3788 if (size <= sizeof(tp->_timeout)) {
3789 tp->_clockid = CLOCK_REALTIME;
3791 error = copyin(uaddr, &tp->_timeout, sizeof(tp->_timeout));
3793 error = copyin(uaddr, tp, sizeof(*tp));
3796 if (tp->_timeout.tv_sec < 0 ||
3797 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3803 umtx_copyin_robust_lists(const void *uaddr, size_t size,
3804 struct umtx_robust_lists_params *rb)
3807 if (size > sizeof(*rb))
3809 return (copyin(uaddr, rb, size));
3813 umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp)
3817 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
3818 * and we're only called if sz >= sizeof(timespec) as supplied in the
3821 KASSERT(sz >= sizeof(*tsp),
3822 ("umtx_copyops specifies incorrect sizes"));
3824 return (copyout(tsp, uaddr, sizeof(*tsp)));
3827 #ifdef COMPAT_FREEBSD10
3829 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap,
3830 const struct umtx_copyops *ops)
3832 struct timespec *ts, timeout;
3835 /* Allow a null timespec (wait forever). */
3836 if (uap->uaddr2 == NULL)
3839 error = ops->copyin_timeout(uap->uaddr2, &timeout);
3844 #ifdef COMPAT_FREEBSD32
3846 return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3848 return (do_lock_umtx(td, uap->obj, uap->val, ts));
3852 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap,
3853 const struct umtx_copyops *ops)
3855 #ifdef COMPAT_FREEBSD32
3857 return (do_unlock_umtx32(td, uap->obj, uap->val));
3859 return (do_unlock_umtx(td, uap->obj, uap->val));
3861 #endif /* COMPAT_FREEBSD10 */
3863 #if !defined(COMPAT_FREEBSD10)
3865 __umtx_op_unimpl(struct thread *td __unused, struct _umtx_op_args *uap __unused,
3866 const struct umtx_copyops *ops __unused)
3868 return (EOPNOTSUPP);
3870 #endif /* COMPAT_FREEBSD10 */
3873 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap,
3874 const struct umtx_copyops *ops)
3876 struct _umtx_time timeout, *tm_p;
3879 if (uap->uaddr2 == NULL)
3882 error = ops->copyin_umtx_time(
3883 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3888 return (do_wait(td, uap->obj, uap->val, tm_p, ops->compat32, 0));
3892 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap,
3893 const struct umtx_copyops *ops)
3895 struct _umtx_time timeout, *tm_p;
3898 if (uap->uaddr2 == NULL)
3901 error = ops->copyin_umtx_time(
3902 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3907 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3911 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap,
3912 const struct umtx_copyops *ops)
3914 struct _umtx_time *tm_p, timeout;
3917 if (uap->uaddr2 == NULL)
3920 error = ops->copyin_umtx_time(
3921 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3926 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3930 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap,
3931 const struct umtx_copyops *ops __unused)
3934 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3937 #define BATCH_SIZE 128
3939 __umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap)
3941 char *uaddrs[BATCH_SIZE], **upp;
3942 int count, error, i, pos, tocopy;
3944 upp = (char **)uap->obj;
3946 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3948 tocopy = MIN(count, BATCH_SIZE);
3949 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
3952 for (i = 0; i < tocopy; ++i) {
3953 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3961 __umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3963 uint32_t uaddrs[BATCH_SIZE], *upp;
3964 int count, error, i, pos, tocopy;
3966 upp = (uint32_t *)uap->obj;
3968 for (count = uap->val, pos = 0; count > 0; count -= tocopy,
3970 tocopy = MIN(count, BATCH_SIZE);
3971 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
3974 for (i = 0; i < tocopy; ++i) {
3975 kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i],
3984 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap,
3985 const struct umtx_copyops *ops)
3989 return (__umtx_op_nwake_private_compat32(td, uap));
3990 return (__umtx_op_nwake_private_native(td, uap));
3994 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap,
3995 const struct umtx_copyops *ops __unused)
3998 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
4002 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap,
4003 const struct umtx_copyops *ops)
4005 struct _umtx_time *tm_p, timeout;
4008 /* Allow a null timespec (wait forever). */
4009 if (uap->uaddr2 == NULL)
4012 error = ops->copyin_umtx_time(
4013 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
4018 return (do_lock_umutex(td, uap->obj, tm_p, 0));
4022 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap,
4023 const struct umtx_copyops *ops __unused)
4026 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
4030 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap,
4031 const struct umtx_copyops *ops)
4033 struct _umtx_time *tm_p, timeout;
4036 /* Allow a null timespec (wait forever). */
4037 if (uap->uaddr2 == NULL)
4040 error = ops->copyin_umtx_time(
4041 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
4046 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
4050 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap,
4051 const struct umtx_copyops *ops __unused)
4054 return (do_wake_umutex(td, uap->obj));
4058 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap,
4059 const struct umtx_copyops *ops __unused)
4062 return (do_unlock_umutex(td, uap->obj, false));
4066 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap,
4067 const struct umtx_copyops *ops __unused)
4070 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
4074 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap,
4075 const struct umtx_copyops *ops)
4077 struct timespec *ts, timeout;
4080 /* Allow a null timespec (wait forever). */
4081 if (uap->uaddr2 == NULL)
4084 error = ops->copyin_timeout(uap->uaddr2, &timeout);
4089 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
4093 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap,
4094 const struct umtx_copyops *ops __unused)
4097 return (do_cv_signal(td, uap->obj));
4101 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap,
4102 const struct umtx_copyops *ops __unused)
4105 return (do_cv_broadcast(td, uap->obj));
4109 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap,
4110 const struct umtx_copyops *ops)
4112 struct _umtx_time timeout;
4115 /* Allow a null timespec (wait forever). */
4116 if (uap->uaddr2 == NULL) {
4117 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
4119 error = ops->copyin_umtx_time(uap->uaddr2,
4120 (size_t)uap->uaddr1, &timeout);
4123 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4129 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap,
4130 const struct umtx_copyops *ops)
4132 struct _umtx_time timeout;
4135 /* Allow a null timespec (wait forever). */
4136 if (uap->uaddr2 == NULL) {
4137 error = do_rw_wrlock(td, uap->obj, 0);
4139 error = ops->copyin_umtx_time(uap->uaddr2,
4140 (size_t)uap->uaddr1, &timeout);
4144 error = do_rw_wrlock(td, uap->obj, &timeout);
4150 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap,
4151 const struct umtx_copyops *ops __unused)
4154 return (do_rw_unlock(td, uap->obj));
4157 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4159 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap,
4160 const struct umtx_copyops *ops)
4162 struct _umtx_time *tm_p, timeout;
4165 /* Allow a null timespec (wait forever). */
4166 if (uap->uaddr2 == NULL)
4169 error = ops->copyin_umtx_time(
4170 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
4175 return (do_sem_wait(td, uap->obj, tm_p));
4179 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap,
4180 const struct umtx_copyops *ops __unused)
4183 return (do_sem_wake(td, uap->obj));
4188 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap,
4189 const struct umtx_copyops *ops __unused)
4192 return (do_wake2_umutex(td, uap->obj, uap->val));
4196 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap,
4197 const struct umtx_copyops *ops)
4199 struct _umtx_time *tm_p, timeout;
4203 /* Allow a null timespec (wait forever). */
4204 if (uap->uaddr2 == NULL) {
4208 uasize = (size_t)uap->uaddr1;
4209 error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout);
4214 error = do_sem2_wait(td, uap->obj, tm_p);
4215 if (error == EINTR && uap->uaddr2 != NULL &&
4216 (timeout._flags & UMTX_ABSTIME) == 0 &&
4217 uasize >= ops->umtx_time_sz + ops->timespec_sz) {
4218 error = ops->copyout_timeout(
4219 (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz),
4220 uasize - ops->umtx_time_sz, &timeout._timeout);
4230 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap,
4231 const struct umtx_copyops *ops __unused)
4234 return (do_sem2_wake(td, uap->obj));
4237 #define USHM_OBJ_UMTX(o) \
4238 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
4240 #define USHMF_REG_LINKED 0x0001
4241 #define USHMF_OBJ_LINKED 0x0002
4242 struct umtx_shm_reg {
4243 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link;
4244 LIST_ENTRY(umtx_shm_reg) ushm_obj_link;
4245 struct umtx_key ushm_key;
4246 struct ucred *ushm_cred;
4247 struct shmfd *ushm_obj;
4252 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg);
4253 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg);
4255 static uma_zone_t umtx_shm_reg_zone;
4256 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS];
4257 static struct mtx umtx_shm_lock;
4258 static struct umtx_shm_reg_head umtx_shm_reg_delfree =
4259 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree);
4261 static void umtx_shm_free_reg(struct umtx_shm_reg *reg);
4264 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
4266 struct umtx_shm_reg_head d;
4267 struct umtx_shm_reg *reg, *reg1;
4270 mtx_lock(&umtx_shm_lock);
4271 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link);
4272 mtx_unlock(&umtx_shm_lock);
4273 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
4274 TAILQ_REMOVE(&d, reg, ushm_reg_link);
4275 umtx_shm_free_reg(reg);
4279 static struct task umtx_shm_reg_delfree_task =
4280 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL);
4282 static struct umtx_shm_reg *
4283 umtx_shm_find_reg_locked(const struct umtx_key *key)
4285 struct umtx_shm_reg *reg;
4286 struct umtx_shm_reg_head *reg_head;
4288 KASSERT(key->shared, ("umtx_p_find_rg: private key"));
4289 mtx_assert(&umtx_shm_lock, MA_OWNED);
4290 reg_head = &umtx_shm_registry[key->hash];
4291 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
4292 KASSERT(reg->ushm_key.shared,
4293 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared));
4294 if (reg->ushm_key.info.shared.object ==
4295 key->info.shared.object &&
4296 reg->ushm_key.info.shared.offset ==
4297 key->info.shared.offset) {
4298 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM"));
4299 KASSERT(reg->ushm_refcnt > 0,
4300 ("reg %p refcnt 0 onlist", reg));
4301 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0,
4302 ("reg %p not linked", reg));
4310 static struct umtx_shm_reg *
4311 umtx_shm_find_reg(const struct umtx_key *key)
4313 struct umtx_shm_reg *reg;
4315 mtx_lock(&umtx_shm_lock);
4316 reg = umtx_shm_find_reg_locked(key);
4317 mtx_unlock(&umtx_shm_lock);
4322 umtx_shm_free_reg(struct umtx_shm_reg *reg)
4325 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
4326 crfree(reg->ushm_cred);
4327 shm_drop(reg->ushm_obj);
4328 uma_zfree(umtx_shm_reg_zone, reg);
4332 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
4336 mtx_assert(&umtx_shm_lock, MA_OWNED);
4337 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg));
4339 res = reg->ushm_refcnt == 0;
4341 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) {
4342 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash],
4343 reg, ushm_reg_link);
4344 reg->ushm_flags &= ~USHMF_REG_LINKED;
4346 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) {
4347 LIST_REMOVE(reg, ushm_obj_link);
4348 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
4355 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
4361 object = reg->ushm_obj->shm_object;
4362 VM_OBJECT_WLOCK(object);
4363 object->flags |= OBJ_UMTXDEAD;
4364 VM_OBJECT_WUNLOCK(object);
4366 mtx_lock(&umtx_shm_lock);
4367 dofree = umtx_shm_unref_reg_locked(reg, force);
4368 mtx_unlock(&umtx_shm_lock);
4370 umtx_shm_free_reg(reg);
4374 umtx_shm_object_init(vm_object_t object)
4377 LIST_INIT(USHM_OBJ_UMTX(object));
4381 umtx_shm_object_terminated(vm_object_t object)
4383 struct umtx_shm_reg *reg, *reg1;
4386 if (LIST_EMPTY(USHM_OBJ_UMTX(object)))
4390 mtx_lock(&umtx_shm_lock);
4391 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) {
4392 if (umtx_shm_unref_reg_locked(reg, true)) {
4393 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg,
4398 mtx_unlock(&umtx_shm_lock);
4400 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task);
4404 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key,
4405 struct umtx_shm_reg **res)
4407 struct umtx_shm_reg *reg, *reg1;
4411 reg = umtx_shm_find_reg(key);
4416 cred = td->td_ucred;
4417 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP)))
4419 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO);
4420 reg->ushm_refcnt = 1;
4421 bcopy(key, ®->ushm_key, sizeof(*key));
4422 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR, false);
4423 reg->ushm_cred = crhold(cred);
4424 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE);
4426 umtx_shm_free_reg(reg);
4429 mtx_lock(&umtx_shm_lock);
4430 reg1 = umtx_shm_find_reg_locked(key);
4432 mtx_unlock(&umtx_shm_lock);
4433 umtx_shm_free_reg(reg);
4438 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link);
4439 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg,
4441 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED;
4442 mtx_unlock(&umtx_shm_lock);
4448 umtx_shm_alive(struct thread *td, void *addr)
4451 vm_map_entry_t entry;
4458 map = &td->td_proc->p_vmspace->vm_map;
4459 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry,
4460 &object, &pindex, &prot, &wired);
4461 if (res != KERN_SUCCESS)
4466 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
4467 vm_map_lookup_done(map, entry);
4476 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg),
4477 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4478 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF);
4479 for (i = 0; i < nitems(umtx_shm_registry); i++)
4480 TAILQ_INIT(&umtx_shm_registry[i]);
4484 umtx_shm(struct thread *td, void *addr, u_int flags)
4486 struct umtx_key key;
4487 struct umtx_shm_reg *reg;
4491 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
4492 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
4494 if ((flags & UMTX_SHM_ALIVE) != 0)
4495 return (umtx_shm_alive(td, addr));
4496 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key);
4499 KASSERT(key.shared == 1, ("non-shared key"));
4500 if ((flags & UMTX_SHM_CREAT) != 0) {
4501 error = umtx_shm_create_reg(td, &key, ®);
4503 reg = umtx_shm_find_reg(&key);
4507 umtx_key_release(&key);
4510 KASSERT(reg != NULL, ("no reg"));
4511 if ((flags & UMTX_SHM_DESTROY) != 0) {
4512 umtx_shm_unref_reg(reg, true);
4516 error = mac_posixshm_check_open(td->td_ucred,
4517 reg->ushm_obj, FFLAGS(O_RDWR));
4520 error = shm_access(reg->ushm_obj, td->td_ucred,
4524 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL);
4526 shm_hold(reg->ushm_obj);
4527 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
4529 td->td_retval[0] = fd;
4533 umtx_shm_unref_reg(reg, false);
4538 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap,
4539 const struct umtx_copyops *ops __unused)
4542 return (umtx_shm(td, uap->uaddr1, uap->val));
4546 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap,
4547 const struct umtx_copyops *ops)
4549 struct umtx_robust_lists_params rb;
4552 if (ops->compat32) {
4553 if ((td->td_pflags2 & TDP2_COMPAT32RB) == 0 &&
4554 (td->td_rb_list != 0 || td->td_rbp_list != 0 ||
4555 td->td_rb_inact != 0))
4557 } else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0) {
4561 bzero(&rb, sizeof(rb));
4562 error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb);
4567 td->td_pflags2 |= TDP2_COMPAT32RB;
4569 td->td_rb_list = rb.robust_list_offset;
4570 td->td_rbp_list = rb.robust_priv_list_offset;
4571 td->td_rb_inact = rb.robust_inact_offset;
4575 #if defined(__i386__) || defined(__amd64__)
4577 * Provide the standard 32-bit definitions for x86, since native/compat32 use a
4578 * 32-bit time_t there. Other architectures just need the i386 definitions
4579 * along with their standard compat32.
4581 struct timespecx32 {
4586 struct umtx_timex32 {
4587 struct timespecx32 _timeout;
4593 #define timespeci386 timespec32
4594 #define umtx_timei386 umtx_time32
4596 #else /* !__i386__ && !__amd64__ */
4597 /* 32-bit architectures can emulate i386, so define these almost everywhere. */
4598 struct timespeci386 {
4603 struct umtx_timei386 {
4604 struct timespeci386 _timeout;
4609 #if defined(__LP64__)
4610 #define timespecx32 timespec32
4611 #define umtx_timex32 umtx_time32
4616 umtx_copyin_robust_lists32(const void *uaddr, size_t size,
4617 struct umtx_robust_lists_params *rbp)
4619 struct umtx_robust_lists_params_compat32 rb32;
4622 if (size > sizeof(rb32))
4624 bzero(&rb32, sizeof(rb32));
4625 error = copyin(uaddr, &rb32, size);
4628 CP(rb32, *rbp, robust_list_offset);
4629 CP(rb32, *rbp, robust_priv_list_offset);
4630 CP(rb32, *rbp, robust_inact_offset);
4636 umtx_copyin_timeouti386(const void *uaddr, struct timespec *tsp)
4638 struct timespeci386 ts32;
4641 error = copyin(uaddr, &ts32, sizeof(ts32));
4643 if (ts32.tv_sec < 0 ||
4644 ts32.tv_nsec >= 1000000000 ||
4648 CP(ts32, *tsp, tv_sec);
4649 CP(ts32, *tsp, tv_nsec);
4656 umtx_copyin_umtx_timei386(const void *uaddr, size_t size, struct _umtx_time *tp)
4658 struct umtx_timei386 t32;
4661 t32._clockid = CLOCK_REALTIME;
4663 if (size <= sizeof(t32._timeout))
4664 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4666 error = copyin(uaddr, &t32, sizeof(t32));
4669 if (t32._timeout.tv_sec < 0 ||
4670 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4672 TS_CP(t32, *tp, _timeout);
4673 CP(t32, *tp, _flags);
4674 CP(t32, *tp, _clockid);
4679 umtx_copyout_timeouti386(void *uaddr, size_t sz, struct timespec *tsp)
4681 struct timespeci386 remain32 = {
4682 .tv_sec = tsp->tv_sec,
4683 .tv_nsec = tsp->tv_nsec,
4687 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4688 * and we're only called if sz >= sizeof(timespec) as supplied in the
4691 KASSERT(sz >= sizeof(remain32),
4692 ("umtx_copyops specifies incorrect sizes"));
4694 return (copyout(&remain32, uaddr, sizeof(remain32)));
4696 #endif /* !__i386__ */
4698 #if defined(__i386__) || defined(__LP64__)
4700 umtx_copyin_timeoutx32(const void *uaddr, struct timespec *tsp)
4702 struct timespecx32 ts32;
4705 error = copyin(uaddr, &ts32, sizeof(ts32));
4707 if (ts32.tv_sec < 0 ||
4708 ts32.tv_nsec >= 1000000000 ||
4712 CP(ts32, *tsp, tv_sec);
4713 CP(ts32, *tsp, tv_nsec);
4720 umtx_copyin_umtx_timex32(const void *uaddr, size_t size, struct _umtx_time *tp)
4722 struct umtx_timex32 t32;
4725 t32._clockid = CLOCK_REALTIME;
4727 if (size <= sizeof(t32._timeout))
4728 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
4730 error = copyin(uaddr, &t32, sizeof(t32));
4733 if (t32._timeout.tv_sec < 0 ||
4734 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4736 TS_CP(t32, *tp, _timeout);
4737 CP(t32, *tp, _flags);
4738 CP(t32, *tp, _clockid);
4743 umtx_copyout_timeoutx32(void *uaddr, size_t sz, struct timespec *tsp)
4745 struct timespecx32 remain32 = {
4746 .tv_sec = tsp->tv_sec,
4747 .tv_nsec = tsp->tv_nsec,
4751 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
4752 * and we're only called if sz >= sizeof(timespec) as supplied in the
4755 KASSERT(sz >= sizeof(remain32),
4756 ("umtx_copyops specifies incorrect sizes"));
4758 return (copyout(&remain32, uaddr, sizeof(remain32)));
4760 #endif /* __i386__ || __LP64__ */
4762 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap,
4763 const struct umtx_copyops *umtx_ops);
4765 static const _umtx_op_func op_table[] = {
4766 #ifdef COMPAT_FREEBSD10
4767 [UMTX_OP_LOCK] = __umtx_op_lock_umtx,
4768 [UMTX_OP_UNLOCK] = __umtx_op_unlock_umtx,
4770 [UMTX_OP_LOCK] = __umtx_op_unimpl,
4771 [UMTX_OP_UNLOCK] = __umtx_op_unimpl,
4773 [UMTX_OP_WAIT] = __umtx_op_wait,
4774 [UMTX_OP_WAKE] = __umtx_op_wake,
4775 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex,
4776 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex,
4777 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex,
4778 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling,
4779 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait,
4780 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal,
4781 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast,
4782 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint,
4783 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock,
4784 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock,
4785 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock,
4786 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
4787 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private,
4788 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex,
4789 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex,
4790 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4791 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4792 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4794 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl,
4795 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl,
4797 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private,
4798 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex,
4799 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait,
4800 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake,
4801 [UMTX_OP_SHM] = __umtx_op_shm,
4802 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists,
4805 static const struct umtx_copyops umtx_native_ops = {
4806 .copyin_timeout = umtx_copyin_timeout,
4807 .copyin_umtx_time = umtx_copyin_umtx_time,
4808 .copyin_robust_lists = umtx_copyin_robust_lists,
4809 .copyout_timeout = umtx_copyout_timeout,
4810 .timespec_sz = sizeof(struct timespec),
4811 .umtx_time_sz = sizeof(struct _umtx_time),
4815 static const struct umtx_copyops umtx_native_opsi386 = {
4816 .copyin_timeout = umtx_copyin_timeouti386,
4817 .copyin_umtx_time = umtx_copyin_umtx_timei386,
4818 .copyin_robust_lists = umtx_copyin_robust_lists32,
4819 .copyout_timeout = umtx_copyout_timeouti386,
4820 .timespec_sz = sizeof(struct timespeci386),
4821 .umtx_time_sz = sizeof(struct umtx_timei386),
4826 #if defined(__i386__) || defined(__LP64__)
4827 /* i386 can emulate other 32-bit archs, too! */
4828 static const struct umtx_copyops umtx_native_opsx32 = {
4829 .copyin_timeout = umtx_copyin_timeoutx32,
4830 .copyin_umtx_time = umtx_copyin_umtx_timex32,
4831 .copyin_robust_lists = umtx_copyin_robust_lists32,
4832 .copyout_timeout = umtx_copyout_timeoutx32,
4833 .timespec_sz = sizeof(struct timespecx32),
4834 .umtx_time_sz = sizeof(struct umtx_timex32),
4838 #ifdef COMPAT_FREEBSD32
4840 #define umtx_native_ops32 umtx_native_opsi386
4842 #define umtx_native_ops32 umtx_native_opsx32
4844 #endif /* COMPAT_FREEBSD32 */
4845 #endif /* __i386__ || __LP64__ */
4847 #define UMTX_OP__FLAGS (UMTX_OP__32BIT | UMTX_OP__I386)
4850 kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val,
4851 void *uaddr1, void *uaddr2, const struct umtx_copyops *ops)
4853 struct _umtx_op_args uap = {
4855 .op = op & ~UMTX_OP__FLAGS,
4861 if ((uap.op >= nitems(op_table)))
4863 return ((*op_table[uap.op])(td, &uap, ops));
4867 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
4869 static const struct umtx_copyops *umtx_ops;
4871 umtx_ops = &umtx_native_ops;
4873 if ((uap->op & (UMTX_OP__32BIT | UMTX_OP__I386)) != 0) {
4874 if ((uap->op & UMTX_OP__I386) != 0)
4875 umtx_ops = &umtx_native_opsi386;
4877 umtx_ops = &umtx_native_opsx32;
4879 #elif !defined(__i386__)
4880 /* We consider UMTX_OP__32BIT a nop on !i386 ILP32. */
4881 if ((uap->op & UMTX_OP__I386) != 0)
4882 umtx_ops = &umtx_native_opsi386;
4884 /* Likewise, UMTX_OP__I386 is a nop on i386. */
4885 if ((uap->op & UMTX_OP__32BIT) != 0)
4886 umtx_ops = &umtx_native_opsx32;
4888 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4889 uap->uaddr2, umtx_ops));
4892 #ifdef COMPAT_FREEBSD32
4893 #ifdef COMPAT_FREEBSD10
4895 freebsd10_freebsd32__umtx_lock(struct thread *td,
4896 struct freebsd10_freebsd32__umtx_lock_args *uap)
4898 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
4902 freebsd10_freebsd32__umtx_unlock(struct thread *td,
4903 struct freebsd10_freebsd32__umtx_unlock_args *uap)
4905 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
4907 #endif /* COMPAT_FREEBSD10 */
4910 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap)
4913 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4914 uap->uaddr2, &umtx_native_ops32));
4916 #endif /* COMPAT_FREEBSD32 */
4919 umtx_thread_init(struct thread *td)
4922 td->td_umtxq = umtxq_alloc();
4923 td->td_umtxq->uq_thread = td;
4927 umtx_thread_fini(struct thread *td)
4930 umtxq_free(td->td_umtxq);
4934 * It will be called when new thread is created, e.g fork().
4937 umtx_thread_alloc(struct thread *td)
4942 uq->uq_inherited_pri = PRI_MAX;
4944 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4945 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4946 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4947 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4953 * Clear robust lists for all process' threads, not delaying the
4954 * cleanup to thread exit, since the relevant address space is
4955 * destroyed right now.
4958 umtx_exec(struct proc *p)
4962 KASSERT(p == curproc, ("need curproc"));
4963 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4964 (p->p_flag & P_STOPPED_SINGLE) != 0,
4965 ("curproc must be single-threaded"));
4967 * There is no need to lock the list as only this thread can be
4970 FOREACH_THREAD_IN_PROC(p, td) {
4971 KASSERT(td == curthread ||
4972 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4973 ("running thread %p %p", p, td));
4974 umtx_thread_cleanup(td);
4975 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4983 umtx_thread_exit(struct thread *td)
4986 umtx_thread_cleanup(td);
4990 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res, bool compat32)
4997 error = fueword32((void *)ptr, &res32);
5001 error = fueword((void *)ptr, &res1);
5011 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list,
5014 struct umutex32 m32;
5017 memcpy(&m32, m, sizeof(m32));
5018 *rb_list = m32.m_rb_lnk;
5020 *rb_list = m->m_rb_lnk;
5025 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact,
5031 KASSERT(td->td_proc == curproc, ("need current vmspace"));
5032 error = copyin((void *)rbp, &m, sizeof(m));
5035 if (rb_list != NULL)
5036 umtx_read_rb_list(td, &m, rb_list, compat32);
5037 if ((m.m_flags & UMUTEX_ROBUST) == 0)
5039 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
5040 /* inact is cleared after unlock, allow the inconsistency */
5041 return (inact ? 0 : EINVAL);
5042 return (do_unlock_umutex(td, (struct umutex *)rbp, true));
5046 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact,
5047 const char *name, bool compat32)
5055 error = umtx_read_uptr(td, rb_list, &rbp, compat32);
5056 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) {
5057 if (rbp == *rb_inact) {
5062 error = umtx_handle_rb(td, rbp, &rbp, inact, compat32);
5064 if (i == umtx_max_rb && umtx_verbose_rb) {
5065 uprintf("comm %s pid %d: reached umtx %smax rb %d\n",
5066 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb);
5068 if (error != 0 && umtx_verbose_rb) {
5069 uprintf("comm %s pid %d: handling %srb error %d\n",
5070 td->td_proc->p_comm, td->td_proc->p_pid, name, error);
5075 * Clean up umtx data.
5078 umtx_thread_cleanup(struct thread *td)
5086 * Disown pi mutexes.
5090 if (uq->uq_inherited_pri != PRI_MAX ||
5091 !TAILQ_EMPTY(&uq->uq_pi_contested)) {
5092 mtx_lock(&umtx_lock);
5093 uq->uq_inherited_pri = PRI_MAX;
5094 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
5095 pi->pi_owner = NULL;
5096 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
5098 mtx_unlock(&umtx_lock);
5100 sched_lend_user_prio_cond(td, PRI_MAX);
5103 compat32 = (td->td_pflags2 & TDP2_COMPAT32RB) != 0;
5104 td->td_pflags2 &= ~TDP2_COMPAT32RB;
5106 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
5110 * Handle terminated robust mutexes. Must be done after
5111 * robust pi disown, otherwise unlock could see unowned
5114 rb_inact = td->td_rb_inact;
5116 (void)umtx_read_uptr(td, rb_inact, &rb_inact, compat32);
5117 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "", compat32);
5118 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ", compat32);
5120 (void)umtx_handle_rb(td, rb_inact, NULL, true, compat32);