2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_compat.h"
32 #include "opt_umtx_profiling.h"
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
45 #include <sys/sysent.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/eventhandler.h>
53 #include <vm/vm_param.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_object.h>
58 #include <machine/cpu.h>
60 #ifdef COMPAT_FREEBSD32
61 #include <compat/freebsd32/freebsd32_proto.h>
65 #define _UMUTEX_WAIT 2
67 /* Priority inheritance mutex info. */
70 struct thread *pi_owner;
75 /* List entry to link umtx holding by thread */
76 TAILQ_ENTRY(umtx_pi) pi_link;
78 /* List entry in hash */
79 TAILQ_ENTRY(umtx_pi) pi_hashlink;
81 /* List for waiters */
82 TAILQ_HEAD(,umtx_q) pi_blocked;
84 /* Identify a userland lock object */
85 struct umtx_key pi_key;
88 /* A userland synchronous object user. */
90 /* Linked list for the hash. */
91 TAILQ_ENTRY(umtx_q) uq_link;
94 struct umtx_key uq_key;
98 #define UQF_UMTXQ 0x0001
100 /* The thread waits on. */
101 struct thread *uq_thread;
104 * Blocked on PI mutex. read can use chain lock
105 * or umtx_lock, write must have both chain lock and
106 * umtx_lock being hold.
108 struct umtx_pi *uq_pi_blocked;
110 /* On blocked list */
111 TAILQ_ENTRY(umtx_q) uq_lockq;
113 /* Thread contending with us */
114 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
116 /* Inherited priority from PP mutex */
117 u_char uq_inherited_pri;
119 /* Spare queue ready to be reused */
120 struct umtxq_queue *uq_spare_queue;
122 /* The queue we on */
123 struct umtxq_queue *uq_cur_queue;
126 TAILQ_HEAD(umtxq_head, umtx_q);
128 /* Per-key wait-queue */
130 struct umtxq_head head;
132 LIST_ENTRY(umtxq_queue) link;
136 LIST_HEAD(umtxq_list, umtxq_queue);
138 /* Userland lock object's wait-queue chain */
140 /* Lock for this chain. */
143 /* List of sleep queues. */
144 struct umtxq_list uc_queue[2];
145 #define UMTX_SHARED_QUEUE 0
146 #define UMTX_EXCLUSIVE_QUEUE 1
148 LIST_HEAD(, umtxq_queue) uc_spare_queue;
153 /* Chain lock waiters */
156 /* All PI in the list */
157 TAILQ_HEAD(,umtx_pi) uc_pi_list;
159 #ifdef UMTX_PROFILING
165 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
166 #define UMTXQ_BUSY_ASSERT(uc) KASSERT(&(uc)->uc_busy, ("umtx chain is not busy"))
169 * Don't propagate time-sharing priority, there is a security reason,
170 * a user can simply introduce PI-mutex, let thread A lock the mutex,
171 * and let another thread B block on the mutex, because B is
172 * sleeping, its priority will be boosted, this causes A's priority to
173 * be boosted via priority propagating too and will never be lowered even
174 * if it is using 100%CPU, this is unfair to other processes.
177 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
178 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
179 PRI_MAX_TIMESHARE : (td)->td_user_pri)
181 #define GOLDEN_RATIO_PRIME 2654404609U
182 #define UMTX_CHAINS 512
183 #define UMTX_SHIFTS (__WORD_BIT - 9)
185 #define GET_SHARE(flags) \
186 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
188 #define BUSY_SPINS 200
196 static uma_zone_t umtx_pi_zone;
197 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
198 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
199 static int umtx_pi_allocated;
201 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
202 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
203 &umtx_pi_allocated, 0, "Allocated umtx_pi");
205 #ifdef UMTX_PROFILING
206 static long max_length;
207 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
208 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
211 static void umtxq_sysinit(void *);
212 static void umtxq_hash(struct umtx_key *key);
213 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
214 static void umtxq_lock(struct umtx_key *key);
215 static void umtxq_unlock(struct umtx_key *key);
216 static void umtxq_busy(struct umtx_key *key);
217 static void umtxq_unbusy(struct umtx_key *key);
218 static void umtxq_insert_queue(struct umtx_q *uq, int q);
219 static void umtxq_remove_queue(struct umtx_q *uq, int q);
220 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
221 static int umtxq_count(struct umtx_key *key);
222 static struct umtx_pi *umtx_pi_alloc(int);
223 static void umtx_pi_free(struct umtx_pi *pi);
224 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags);
225 static void umtx_thread_cleanup(struct thread *td);
226 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
227 struct image_params *imgp __unused);
228 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
230 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
231 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
232 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
234 static struct mtx umtx_lock;
236 #ifdef UMTX_PROFILING
238 umtx_init_profiling(void)
240 struct sysctl_oid *chain_oid;
244 for (i = 0; i < UMTX_CHAINS; ++i) {
245 snprintf(chain_name, sizeof(chain_name), "%d", i);
246 chain_oid = SYSCTL_ADD_NODE(NULL,
247 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
248 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
249 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
250 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
251 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
252 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
258 umtxq_sysinit(void *arg __unused)
262 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
263 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
264 for (i = 0; i < 2; ++i) {
265 for (j = 0; j < UMTX_CHAINS; ++j) {
266 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
267 MTX_DEF | MTX_DUPOK);
268 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
269 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
270 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
271 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
272 umtxq_chains[i][j].uc_busy = 0;
273 umtxq_chains[i][j].uc_waiters = 0;
274 #ifdef UMTX_PROFILING
275 umtxq_chains[i][j].length = 0;
276 umtxq_chains[i][j].max_length = 0;
280 #ifdef UMTX_PROFILING
281 umtx_init_profiling();
283 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN);
284 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
285 EVENTHANDLER_PRI_ANY);
293 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
294 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO);
295 TAILQ_INIT(&uq->uq_spare_queue->head);
296 TAILQ_INIT(&uq->uq_pi_contested);
297 uq->uq_inherited_pri = PRI_MAX;
302 umtxq_free(struct umtx_q *uq)
304 MPASS(uq->uq_spare_queue != NULL);
305 free(uq->uq_spare_queue, M_UMTX);
310 umtxq_hash(struct umtx_key *key)
312 unsigned n = (uintptr_t)key->info.both.a + key->info.both.b;
313 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
316 static inline struct umtxq_chain *
317 umtxq_getchain(struct umtx_key *key)
319 if (key->type <= TYPE_SEM)
320 return (&umtxq_chains[1][key->hash]);
321 return (&umtxq_chains[0][key->hash]);
328 umtxq_lock(struct umtx_key *key)
330 struct umtxq_chain *uc;
332 uc = umtxq_getchain(key);
333 mtx_lock(&uc->uc_lock);
340 umtxq_unlock(struct umtx_key *key)
342 struct umtxq_chain *uc;
344 uc = umtxq_getchain(key);
345 mtx_unlock(&uc->uc_lock);
349 * Set chain to busy state when following operation
350 * may be blocked (kernel mutex can not be used).
353 umtxq_busy(struct umtx_key *key)
355 struct umtxq_chain *uc;
357 uc = umtxq_getchain(key);
358 mtx_assert(&uc->uc_lock, MA_OWNED);
362 int count = BUSY_SPINS;
365 while (uc->uc_busy && --count > 0)
371 while (uc->uc_busy) {
373 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
384 umtxq_unbusy(struct umtx_key *key)
386 struct umtxq_chain *uc;
388 uc = umtxq_getchain(key);
389 mtx_assert(&uc->uc_lock, MA_OWNED);
390 KASSERT(uc->uc_busy != 0, ("not busy"));
396 static struct umtxq_queue *
397 umtxq_queue_lookup(struct umtx_key *key, int q)
399 struct umtxq_queue *uh;
400 struct umtxq_chain *uc;
402 uc = umtxq_getchain(key);
403 UMTXQ_LOCKED_ASSERT(uc);
404 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
405 if (umtx_key_match(&uh->key, key))
413 umtxq_insert_queue(struct umtx_q *uq, int q)
415 struct umtxq_queue *uh;
416 struct umtxq_chain *uc;
418 uc = umtxq_getchain(&uq->uq_key);
419 UMTXQ_LOCKED_ASSERT(uc);
420 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
421 uh = umtxq_queue_lookup(&uq->uq_key, q);
423 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
425 uh = uq->uq_spare_queue;
426 uh->key = uq->uq_key;
427 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
429 uq->uq_spare_queue = NULL;
431 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
433 #ifdef UMTX_PROFILING
435 if (uc->length > uc->max_length) {
436 uc->max_length = uc->length;
437 if (uc->max_length > max_length)
438 max_length = uc->max_length;
441 uq->uq_flags |= UQF_UMTXQ;
442 uq->uq_cur_queue = uh;
447 umtxq_remove_queue(struct umtx_q *uq, int q)
449 struct umtxq_chain *uc;
450 struct umtxq_queue *uh;
452 uc = umtxq_getchain(&uq->uq_key);
453 UMTXQ_LOCKED_ASSERT(uc);
454 if (uq->uq_flags & UQF_UMTXQ) {
455 uh = uq->uq_cur_queue;
456 TAILQ_REMOVE(&uh->head, uq, uq_link);
458 #ifdef UMTX_PROFILING
461 uq->uq_flags &= ~UQF_UMTXQ;
462 if (TAILQ_EMPTY(&uh->head)) {
463 KASSERT(uh->length == 0,
464 ("inconsistent umtxq_queue length"));
465 LIST_REMOVE(uh, link);
467 uh = LIST_FIRST(&uc->uc_spare_queue);
468 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
469 LIST_REMOVE(uh, link);
471 uq->uq_spare_queue = uh;
472 uq->uq_cur_queue = NULL;
477 * Check if there are multiple waiters
480 umtxq_count(struct umtx_key *key)
482 struct umtxq_chain *uc;
483 struct umtxq_queue *uh;
485 uc = umtxq_getchain(key);
486 UMTXQ_LOCKED_ASSERT(uc);
487 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
494 * Check if there are multiple PI waiters and returns first
498 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
500 struct umtxq_chain *uc;
501 struct umtxq_queue *uh;
504 uc = umtxq_getchain(key);
505 UMTXQ_LOCKED_ASSERT(uc);
506 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
508 *first = TAILQ_FIRST(&uh->head);
515 * Wake up threads waiting on an userland object.
519 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
521 struct umtxq_chain *uc;
522 struct umtxq_queue *uh;
527 uc = umtxq_getchain(key);
528 UMTXQ_LOCKED_ASSERT(uc);
529 uh = umtxq_queue_lookup(key, q);
531 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
532 umtxq_remove_queue(uq, q);
543 * Wake up specified thread.
546 umtxq_signal_thread(struct umtx_q *uq)
548 struct umtxq_chain *uc;
550 uc = umtxq_getchain(&uq->uq_key);
551 UMTXQ_LOCKED_ASSERT(uc);
557 tstohz(const struct timespec *tsp)
561 TIMESPEC_TO_TIMEVAL(&tv, tsp);
566 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
567 const struct timespec *timeout)
570 timo->clockid = clockid;
572 kern_clock_gettime(curthread, clockid, &timo->end);
573 timo->cur = timo->end;
574 timespecadd(&timo->end, timeout);
576 timo->end = *timeout;
577 kern_clock_gettime(curthread, clockid, &timo->cur);
582 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
585 abs_timeout_init(timo, umtxtime->_clockid,
586 (umtxtime->_flags & UMTX_ABSTIME) != 0,
587 &umtxtime->_timeout);
591 abs_timeout_update(struct abs_timeout *timo)
593 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
594 return (timespeccmp(&timo->cur, &timo->end, >=));
598 abs_timeout_gethz(struct abs_timeout *timo)
603 timespecsub(&tts, &timo->cur);
604 return (tstohz(&tts));
608 * Put thread into sleep state, before sleeping, check if
609 * thread was removed from umtx queue.
612 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *timo)
614 struct umtxq_chain *uc;
617 uc = umtxq_getchain(&uq->uq_key);
618 UMTXQ_LOCKED_ASSERT(uc);
620 if (!(uq->uq_flags & UQF_UMTXQ))
622 error = msleep(uq, &uc->uc_lock, PCATCH, wmesg,
623 timo == NULL ? 0 : abs_timeout_gethz(timo));
624 if (error != EWOULDBLOCK)
626 umtxq_unlock(&uq->uq_key);
627 if (abs_timeout_update(timo)) {
629 umtxq_lock(&uq->uq_key);
632 umtxq_lock(&uq->uq_key);
638 * Convert userspace address into unique logical address.
641 umtx_key_get(void *addr, int type, int share, struct umtx_key *key)
643 struct thread *td = curthread;
645 vm_map_entry_t entry;
651 if (share == THREAD_SHARE) {
653 key->info.private.vs = td->td_proc->p_vmspace;
654 key->info.private.addr = (uintptr_t)addr;
656 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
657 map = &td->td_proc->p_vmspace->vm_map;
658 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
659 &entry, &key->info.shared.object, &pindex, &prot,
660 &wired) != KERN_SUCCESS) {
664 if ((share == PROCESS_SHARE) ||
665 (share == AUTO_SHARE &&
666 VM_INHERIT_SHARE == entry->inheritance)) {
668 key->info.shared.offset = entry->offset + entry->start -
670 vm_object_reference(key->info.shared.object);
673 key->info.private.vs = td->td_proc->p_vmspace;
674 key->info.private.addr = (uintptr_t)addr;
676 vm_map_lookup_done(map, entry);
687 umtx_key_release(struct umtx_key *key)
690 vm_object_deallocate(key->info.shared.object);
694 * Lock a umtx object.
697 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id,
698 const struct timespec *timeout)
700 struct abs_timeout timo;
708 abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
711 * Care must be exercised when dealing with umtx structure. It
712 * can fault on any access.
716 * Try the uncontested case. This should be done in userland.
718 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id);
720 /* The acquire succeeded. */
721 if (owner == UMTX_UNOWNED)
724 /* The address was invalid. */
728 /* If no one owns it but it is contested try to acquire it. */
729 if (owner == UMTX_CONTESTED) {
730 owner = casuword(&umtx->u_owner,
731 UMTX_CONTESTED, id | UMTX_CONTESTED);
733 if (owner == UMTX_CONTESTED)
736 /* The address was invalid. */
740 /* If this failed the lock has changed, restart. */
745 * If we caught a signal, we have retried and now
751 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK,
752 AUTO_SHARE, &uq->uq_key)) != 0)
755 umtxq_lock(&uq->uq_key);
756 umtxq_busy(&uq->uq_key);
758 umtxq_unbusy(&uq->uq_key);
759 umtxq_unlock(&uq->uq_key);
762 * Set the contested bit so that a release in user space
763 * knows to use the system call for unlock. If this fails
764 * either some one else has acquired the lock or it has been
767 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
769 /* The address was invalid. */
771 umtxq_lock(&uq->uq_key);
773 umtxq_unlock(&uq->uq_key);
774 umtx_key_release(&uq->uq_key);
779 * We set the contested bit, sleep. Otherwise the lock changed
780 * and we need to retry or we lost a race to the thread
781 * unlocking the umtx.
783 umtxq_lock(&uq->uq_key);
785 error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL :
788 umtxq_unlock(&uq->uq_key);
789 umtx_key_release(&uq->uq_key);
792 if (timeout == NULL) {
793 /* Mutex locking is restarted if it is interrupted. */
797 /* Timed-locking is not restarted. */
798 if (error == ERESTART)
805 * Unlock a umtx object.
808 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
817 * Make sure we own this mtx.
819 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
823 if ((owner & ~UMTX_CONTESTED) != id)
826 /* This should be done in userland */
827 if ((owner & UMTX_CONTESTED) == 0) {
828 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
836 /* We should only ever be in here for contested locks */
837 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
843 count = umtxq_count(&key);
847 * When unlocking the umtx, it must be marked as unowned if
848 * there is zero or one thread only waiting for it.
849 * Otherwise, it must be marked as contested.
851 old = casuword(&umtx->u_owner, owner,
852 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
854 umtxq_signal(&key,1);
857 umtx_key_release(&key);
865 #ifdef COMPAT_FREEBSD32
868 * Lock a umtx object.
871 do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id,
872 const struct timespec *timeout)
874 struct abs_timeout timo;
883 abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
886 * Care must be exercised when dealing with umtx structure. It
887 * can fault on any access.
891 * Try the uncontested case. This should be done in userland.
893 owner = casuword32(m, UMUTEX_UNOWNED, id);
895 /* The acquire succeeded. */
896 if (owner == UMUTEX_UNOWNED)
899 /* The address was invalid. */
903 /* If no one owns it but it is contested try to acquire it. */
904 if (owner == UMUTEX_CONTESTED) {
905 owner = casuword32(m,
906 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
907 if (owner == UMUTEX_CONTESTED)
910 /* The address was invalid. */
914 /* If this failed the lock has changed, restart. */
919 * If we caught a signal, we have retried and now
925 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK,
926 AUTO_SHARE, &uq->uq_key)) != 0)
929 umtxq_lock(&uq->uq_key);
930 umtxq_busy(&uq->uq_key);
932 umtxq_unbusy(&uq->uq_key);
933 umtxq_unlock(&uq->uq_key);
936 * Set the contested bit so that a release in user space
937 * knows to use the system call for unlock. If this fails
938 * either some one else has acquired the lock or it has been
941 old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
943 /* The address was invalid. */
945 umtxq_lock(&uq->uq_key);
947 umtxq_unlock(&uq->uq_key);
948 umtx_key_release(&uq->uq_key);
953 * We set the contested bit, sleep. Otherwise the lock changed
954 * and we need to retry or we lost a race to the thread
955 * unlocking the umtx.
957 umtxq_lock(&uq->uq_key);
959 error = umtxq_sleep(uq, "umtx", timeout == NULL ?
962 umtxq_unlock(&uq->uq_key);
963 umtx_key_release(&uq->uq_key);
966 if (timeout == NULL) {
967 /* Mutex locking is restarted if it is interrupted. */
971 /* Timed-locking is not restarted. */
972 if (error == ERESTART)
979 * Unlock a umtx object.
982 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id)
991 * Make sure we own this mtx.
997 if ((owner & ~UMUTEX_CONTESTED) != id)
1000 /* This should be done in userland */
1001 if ((owner & UMUTEX_CONTESTED) == 0) {
1002 old = casuword32(m, owner, UMUTEX_UNOWNED);
1010 /* We should only ever be in here for contested locks */
1011 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1017 count = umtxq_count(&key);
1021 * When unlocking the umtx, it must be marked as unowned if
1022 * there is zero or one thread only waiting for it.
1023 * Otherwise, it must be marked as contested.
1025 old = casuword32(m, owner,
1026 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1028 umtxq_signal(&key,1);
1031 umtx_key_release(&key);
1041 * Fetch and compare value, sleep on the address if value is not changed.
1044 do_wait(struct thread *td, void *addr, u_long id,
1045 struct _umtx_time *timeout, int compat32, int is_private)
1047 struct abs_timeout timo;
1053 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
1054 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1057 if (timeout != NULL)
1058 abs_timeout_init2(&timo, timeout);
1060 umtxq_lock(&uq->uq_key);
1062 umtxq_unlock(&uq->uq_key);
1066 tmp = (unsigned int)fuword32(addr);
1067 umtxq_lock(&uq->uq_key);
1069 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
1071 if ((uq->uq_flags & UQF_UMTXQ) == 0)
1075 umtxq_unlock(&uq->uq_key);
1076 umtx_key_release(&uq->uq_key);
1077 if (error == ERESTART)
1083 * Wake up threads sleeping on the specified address.
1086 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1088 struct umtx_key key;
1091 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1092 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1095 ret = umtxq_signal(&key, n_wake);
1097 umtx_key_release(&key);
1102 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1105 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1106 struct _umtx_time *timeout, int mode)
1108 struct abs_timeout timo;
1110 uint32_t owner, old, id;
1116 if (timeout != NULL)
1117 abs_timeout_init2(&timo, timeout);
1120 * Care must be exercised when dealing with umtx structure. It
1121 * can fault on any access.
1124 owner = fuword32(__DEVOLATILE(void *, &m->m_owner));
1125 if (mode == _UMUTEX_WAIT) {
1126 if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED)
1130 * Try the uncontested case. This should be done in userland.
1132 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1134 /* The acquire succeeded. */
1135 if (owner == UMUTEX_UNOWNED)
1138 /* The address was invalid. */
1142 /* If no one owns it but it is contested try to acquire it. */
1143 if (owner == UMUTEX_CONTESTED) {
1144 owner = casuword32(&m->m_owner,
1145 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1147 if (owner == UMUTEX_CONTESTED)
1150 /* The address was invalid. */
1154 /* If this failed the lock has changed, restart. */
1159 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1160 (owner & ~UMUTEX_CONTESTED) == id)
1163 if (mode == _UMUTEX_TRY)
1167 * If we caught a signal, we have retried and now
1173 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1174 GET_SHARE(flags), &uq->uq_key)) != 0)
1177 umtxq_lock(&uq->uq_key);
1178 umtxq_busy(&uq->uq_key);
1180 umtxq_unlock(&uq->uq_key);
1183 * Set the contested bit so that a release in user space
1184 * knows to use the system call for unlock. If this fails
1185 * either some one else has acquired the lock or it has been
1188 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1190 /* The address was invalid. */
1192 umtxq_lock(&uq->uq_key);
1194 umtxq_unbusy(&uq->uq_key);
1195 umtxq_unlock(&uq->uq_key);
1196 umtx_key_release(&uq->uq_key);
1201 * We set the contested bit, sleep. Otherwise the lock changed
1202 * and we need to retry or we lost a race to the thread
1203 * unlocking the umtx.
1205 umtxq_lock(&uq->uq_key);
1206 umtxq_unbusy(&uq->uq_key);
1208 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1211 umtxq_unlock(&uq->uq_key);
1212 umtx_key_release(&uq->uq_key);
1219 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1222 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags)
1224 struct umtx_key key;
1225 uint32_t owner, old, id;
1231 * Make sure we own this mtx.
1233 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1237 if ((owner & ~UMUTEX_CONTESTED) != id)
1240 if ((owner & UMUTEX_CONTESTED) == 0) {
1241 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1249 /* We should only ever be in here for contested locks */
1250 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1256 count = umtxq_count(&key);
1260 * When unlocking the umtx, it must be marked as unowned if
1261 * there is zero or one thread only waiting for it.
1262 * Otherwise, it must be marked as contested.
1264 old = casuword32(&m->m_owner, owner,
1265 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1267 umtxq_signal(&key,1);
1270 umtx_key_release(&key);
1279 * Check if the mutex is available and wake up a waiter,
1280 * only for simple mutex.
1283 do_wake_umutex(struct thread *td, struct umutex *m)
1285 struct umtx_key key;
1291 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1295 if ((owner & ~UMUTEX_CONTESTED) != 0)
1298 flags = fuword32(&m->m_flags);
1300 /* We should only ever be in here for contested locks */
1301 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1307 count = umtxq_count(&key);
1311 owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED);
1314 if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1315 umtxq_signal(&key, 1);
1318 umtx_key_release(&key);
1323 * Check if the mutex has waiters and tries to fix contention bit.
1326 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1328 struct umtx_key key;
1329 uint32_t owner, old;
1334 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
1336 type = TYPE_NORMAL_UMUTEX;
1338 case UMUTEX_PRIO_INHERIT:
1339 type = TYPE_PI_UMUTEX;
1341 case UMUTEX_PRIO_PROTECT:
1342 type = TYPE_PP_UMUTEX;
1347 if ((error = umtx_key_get(m, type, GET_SHARE(flags),
1354 count = umtxq_count(&key);
1357 * Only repair contention bit if there is a waiter, this means the mutex
1358 * is still being referenced by userland code, otherwise don't update
1362 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1363 while ((owner & UMUTEX_CONTESTED) ==0) {
1364 old = casuword32(&m->m_owner, owner,
1365 owner|UMUTEX_CONTESTED);
1370 } else if (count == 1) {
1371 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1372 while ((owner & ~UMUTEX_CONTESTED) != 0 &&
1373 (owner & UMUTEX_CONTESTED) == 0) {
1374 old = casuword32(&m->m_owner, owner,
1375 owner|UMUTEX_CONTESTED);
1384 umtxq_signal(&key, INT_MAX);
1386 else if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1387 umtxq_signal(&key, 1);
1390 umtx_key_release(&key);
1394 static inline struct umtx_pi *
1395 umtx_pi_alloc(int flags)
1399 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1400 TAILQ_INIT(&pi->pi_blocked);
1401 atomic_add_int(&umtx_pi_allocated, 1);
1406 umtx_pi_free(struct umtx_pi *pi)
1408 uma_zfree(umtx_pi_zone, pi);
1409 atomic_add_int(&umtx_pi_allocated, -1);
1413 * Adjust the thread's position on a pi_state after its priority has been
1417 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1419 struct umtx_q *uq, *uq1, *uq2;
1422 mtx_assert(&umtx_lock, MA_OWNED);
1429 * Check if the thread needs to be moved on the blocked chain.
1430 * It needs to be moved if either its priority is lower than
1431 * the previous thread or higher than the next thread.
1433 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1434 uq2 = TAILQ_NEXT(uq, uq_lockq);
1435 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1436 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1438 * Remove thread from blocked chain and determine where
1439 * it should be moved to.
1441 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1442 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1443 td1 = uq1->uq_thread;
1444 MPASS(td1->td_proc->p_magic == P_MAGIC);
1445 if (UPRI(td1) > UPRI(td))
1450 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1452 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1458 * Propagate priority when a thread is blocked on POSIX
1462 umtx_propagate_priority(struct thread *td)
1468 mtx_assert(&umtx_lock, MA_OWNED);
1471 pi = uq->uq_pi_blocked;
1477 if (td == NULL || td == curthread)
1480 MPASS(td->td_proc != NULL);
1481 MPASS(td->td_proc->p_magic == P_MAGIC);
1484 if (td->td_lend_user_pri > pri)
1485 sched_lend_user_prio(td, pri);
1493 * Pick up the lock that td is blocked on.
1496 pi = uq->uq_pi_blocked;
1499 /* Resort td on the list if needed. */
1500 umtx_pi_adjust_thread(pi, td);
1505 * Unpropagate priority for a PI mutex when a thread blocked on
1506 * it is interrupted by signal or resumed by others.
1509 umtx_repropagate_priority(struct umtx_pi *pi)
1511 struct umtx_q *uq, *uq_owner;
1512 struct umtx_pi *pi2;
1515 mtx_assert(&umtx_lock, MA_OWNED);
1517 while (pi != NULL && pi->pi_owner != NULL) {
1519 uq_owner = pi->pi_owner->td_umtxq;
1521 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1522 uq = TAILQ_FIRST(&pi2->pi_blocked);
1524 if (pri > UPRI(uq->uq_thread))
1525 pri = UPRI(uq->uq_thread);
1529 if (pri > uq_owner->uq_inherited_pri)
1530 pri = uq_owner->uq_inherited_pri;
1531 thread_lock(pi->pi_owner);
1532 sched_lend_user_prio(pi->pi_owner, pri);
1533 thread_unlock(pi->pi_owner);
1534 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1535 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1540 * Insert a PI mutex into owned list.
1543 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1545 struct umtx_q *uq_owner;
1547 uq_owner = owner->td_umtxq;
1548 mtx_assert(&umtx_lock, MA_OWNED);
1549 if (pi->pi_owner != NULL)
1550 panic("pi_ower != NULL");
1551 pi->pi_owner = owner;
1552 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1556 * Claim ownership of a PI mutex.
1559 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1561 struct umtx_q *uq, *uq_owner;
1563 uq_owner = owner->td_umtxq;
1564 mtx_lock_spin(&umtx_lock);
1565 if (pi->pi_owner == owner) {
1566 mtx_unlock_spin(&umtx_lock);
1570 if (pi->pi_owner != NULL) {
1572 * userland may have already messed the mutex, sigh.
1574 mtx_unlock_spin(&umtx_lock);
1577 umtx_pi_setowner(pi, owner);
1578 uq = TAILQ_FIRST(&pi->pi_blocked);
1582 pri = UPRI(uq->uq_thread);
1584 if (pri < UPRI(owner))
1585 sched_lend_user_prio(owner, pri);
1586 thread_unlock(owner);
1588 mtx_unlock_spin(&umtx_lock);
1593 * Adjust a thread's order position in its blocked PI mutex,
1594 * this may result new priority propagating process.
1597 umtx_pi_adjust(struct thread *td, u_char oldpri)
1603 mtx_lock_spin(&umtx_lock);
1605 * Pick up the lock that td is blocked on.
1607 pi = uq->uq_pi_blocked;
1609 umtx_pi_adjust_thread(pi, td);
1610 umtx_repropagate_priority(pi);
1612 mtx_unlock_spin(&umtx_lock);
1616 * Sleep on a PI mutex.
1619 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
1620 uint32_t owner, const char *wmesg, struct abs_timeout *timo)
1622 struct umtxq_chain *uc;
1623 struct thread *td, *td1;
1629 KASSERT(td == curthread, ("inconsistent uq_thread"));
1630 uc = umtxq_getchain(&uq->uq_key);
1631 UMTXQ_LOCKED_ASSERT(uc);
1632 UMTXQ_BUSY_ASSERT(uc);
1634 mtx_lock_spin(&umtx_lock);
1635 if (pi->pi_owner == NULL) {
1636 mtx_unlock_spin(&umtx_lock);
1637 /* XXX Only look up thread in current process. */
1638 td1 = tdfind(owner, curproc->p_pid);
1639 mtx_lock_spin(&umtx_lock);
1641 if (pi->pi_owner == NULL)
1642 umtx_pi_setowner(pi, td1);
1643 PROC_UNLOCK(td1->td_proc);
1647 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1648 pri = UPRI(uq1->uq_thread);
1654 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1656 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1658 uq->uq_pi_blocked = pi;
1660 td->td_flags |= TDF_UPIBLOCKED;
1662 umtx_propagate_priority(td);
1663 mtx_unlock_spin(&umtx_lock);
1664 umtxq_unbusy(&uq->uq_key);
1666 error = umtxq_sleep(uq, wmesg, timo);
1669 mtx_lock_spin(&umtx_lock);
1670 uq->uq_pi_blocked = NULL;
1672 td->td_flags &= ~TDF_UPIBLOCKED;
1674 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1675 umtx_repropagate_priority(pi);
1676 mtx_unlock_spin(&umtx_lock);
1677 umtxq_unlock(&uq->uq_key);
1683 * Add reference count for a PI mutex.
1686 umtx_pi_ref(struct umtx_pi *pi)
1688 struct umtxq_chain *uc;
1690 uc = umtxq_getchain(&pi->pi_key);
1691 UMTXQ_LOCKED_ASSERT(uc);
1696 * Decrease reference count for a PI mutex, if the counter
1697 * is decreased to zero, its memory space is freed.
1700 umtx_pi_unref(struct umtx_pi *pi)
1702 struct umtxq_chain *uc;
1704 uc = umtxq_getchain(&pi->pi_key);
1705 UMTXQ_LOCKED_ASSERT(uc);
1706 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1707 if (--pi->pi_refcount == 0) {
1708 mtx_lock_spin(&umtx_lock);
1709 if (pi->pi_owner != NULL) {
1710 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
1712 pi->pi_owner = NULL;
1714 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1715 ("blocked queue not empty"));
1716 mtx_unlock_spin(&umtx_lock);
1717 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1723 * Find a PI mutex in hash table.
1725 static struct umtx_pi *
1726 umtx_pi_lookup(struct umtx_key *key)
1728 struct umtxq_chain *uc;
1731 uc = umtxq_getchain(key);
1732 UMTXQ_LOCKED_ASSERT(uc);
1734 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1735 if (umtx_key_match(&pi->pi_key, key)) {
1743 * Insert a PI mutex into hash table.
1746 umtx_pi_insert(struct umtx_pi *pi)
1748 struct umtxq_chain *uc;
1750 uc = umtxq_getchain(&pi->pi_key);
1751 UMTXQ_LOCKED_ASSERT(uc);
1752 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1759 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1760 struct _umtx_time *timeout, int try)
1762 struct abs_timeout timo;
1764 struct umtx_pi *pi, *new_pi;
1765 uint32_t id, owner, old;
1771 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1775 if (timeout != NULL)
1776 abs_timeout_init2(&timo, timeout);
1778 umtxq_lock(&uq->uq_key);
1779 pi = umtx_pi_lookup(&uq->uq_key);
1781 new_pi = umtx_pi_alloc(M_NOWAIT);
1782 if (new_pi == NULL) {
1783 umtxq_unlock(&uq->uq_key);
1784 new_pi = umtx_pi_alloc(M_WAITOK);
1785 umtxq_lock(&uq->uq_key);
1786 pi = umtx_pi_lookup(&uq->uq_key);
1788 umtx_pi_free(new_pi);
1792 if (new_pi != NULL) {
1793 new_pi->pi_key = uq->uq_key;
1794 umtx_pi_insert(new_pi);
1799 umtxq_unlock(&uq->uq_key);
1802 * Care must be exercised when dealing with umtx structure. It
1803 * can fault on any access.
1807 * Try the uncontested case. This should be done in userland.
1809 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1811 /* The acquire succeeded. */
1812 if (owner == UMUTEX_UNOWNED) {
1817 /* The address was invalid. */
1823 /* If no one owns it but it is contested try to acquire it. */
1824 if (owner == UMUTEX_CONTESTED) {
1825 owner = casuword32(&m->m_owner,
1826 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1828 if (owner == UMUTEX_CONTESTED) {
1829 umtxq_lock(&uq->uq_key);
1830 umtxq_busy(&uq->uq_key);
1831 error = umtx_pi_claim(pi, td);
1832 umtxq_unbusy(&uq->uq_key);
1833 umtxq_unlock(&uq->uq_key);
1837 /* The address was invalid. */
1843 /* If this failed the lock has changed, restart. */
1847 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1848 (owner & ~UMUTEX_CONTESTED) == id) {
1859 * If we caught a signal, we have retried and now
1865 umtxq_lock(&uq->uq_key);
1866 umtxq_busy(&uq->uq_key);
1867 umtxq_unlock(&uq->uq_key);
1870 * Set the contested bit so that a release in user space
1871 * knows to use the system call for unlock. If this fails
1872 * either some one else has acquired the lock or it has been
1875 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1877 /* The address was invalid. */
1879 umtxq_lock(&uq->uq_key);
1880 umtxq_unbusy(&uq->uq_key);
1881 umtxq_unlock(&uq->uq_key);
1886 umtxq_lock(&uq->uq_key);
1888 * We set the contested bit, sleep. Otherwise the lock changed
1889 * and we need to retry or we lost a race to the thread
1890 * unlocking the umtx.
1893 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
1894 "umtxpi", timeout == NULL ? NULL : &timo);
1896 umtxq_unbusy(&uq->uq_key);
1897 umtxq_unlock(&uq->uq_key);
1901 umtxq_lock(&uq->uq_key);
1903 umtxq_unlock(&uq->uq_key);
1905 umtx_key_release(&uq->uq_key);
1910 * Unlock a PI mutex.
1913 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
1915 struct umtx_key key;
1916 struct umtx_q *uq_first, *uq_first2, *uq_me;
1917 struct umtx_pi *pi, *pi2;
1918 uint32_t owner, old, id;
1925 * Make sure we own this mtx.
1927 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1931 if ((owner & ~UMUTEX_CONTESTED) != id)
1934 /* This should be done in userland */
1935 if ((owner & UMUTEX_CONTESTED) == 0) {
1936 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1944 /* We should only ever be in here for contested locks */
1945 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1951 count = umtxq_count_pi(&key, &uq_first);
1952 if (uq_first != NULL) {
1953 mtx_lock_spin(&umtx_lock);
1954 pi = uq_first->uq_pi_blocked;
1955 KASSERT(pi != NULL, ("pi == NULL?"));
1956 if (pi->pi_owner != curthread) {
1957 mtx_unlock_spin(&umtx_lock);
1960 umtx_key_release(&key);
1961 /* userland messed the mutex */
1964 uq_me = curthread->td_umtxq;
1965 pi->pi_owner = NULL;
1966 TAILQ_REMOVE(&uq_me->uq_pi_contested, pi, pi_link);
1967 /* get highest priority thread which is still sleeping. */
1968 uq_first = TAILQ_FIRST(&pi->pi_blocked);
1969 while (uq_first != NULL &&
1970 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
1971 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
1974 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
1975 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
1976 if (uq_first2 != NULL) {
1977 if (pri > UPRI(uq_first2->uq_thread))
1978 pri = UPRI(uq_first2->uq_thread);
1981 thread_lock(curthread);
1982 sched_lend_user_prio(curthread, pri);
1983 thread_unlock(curthread);
1984 mtx_unlock_spin(&umtx_lock);
1986 umtxq_signal_thread(uq_first);
1991 * When unlocking the umtx, it must be marked as unowned if
1992 * there is zero or one thread only waiting for it.
1993 * Otherwise, it must be marked as contested.
1995 old = casuword32(&m->m_owner, owner,
1996 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
2001 umtx_key_release(&key);
2013 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2014 struct _umtx_time *timeout, int try)
2016 struct abs_timeout timo;
2017 struct umtx_q *uq, *uq2;
2021 int error, pri, old_inherited_pri, su;
2025 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2029 if (timeout != NULL)
2030 abs_timeout_init2(&timo, timeout);
2032 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2034 old_inherited_pri = uq->uq_inherited_pri;
2035 umtxq_lock(&uq->uq_key);
2036 umtxq_busy(&uq->uq_key);
2037 umtxq_unlock(&uq->uq_key);
2039 ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]);
2040 if (ceiling > RTP_PRIO_MAX) {
2045 mtx_lock_spin(&umtx_lock);
2046 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2047 mtx_unlock_spin(&umtx_lock);
2051 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2052 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2054 if (uq->uq_inherited_pri < UPRI(td))
2055 sched_lend_user_prio(td, uq->uq_inherited_pri);
2058 mtx_unlock_spin(&umtx_lock);
2060 owner = casuword32(&m->m_owner,
2061 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2063 if (owner == UMUTEX_CONTESTED) {
2068 /* The address was invalid. */
2074 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
2075 (owner & ~UMUTEX_CONTESTED) == id) {
2086 * If we caught a signal, we have retried and now
2092 umtxq_lock(&uq->uq_key);
2094 umtxq_unbusy(&uq->uq_key);
2095 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2098 umtxq_unlock(&uq->uq_key);
2100 mtx_lock_spin(&umtx_lock);
2101 uq->uq_inherited_pri = old_inherited_pri;
2103 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2104 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2106 if (pri > UPRI(uq2->uq_thread))
2107 pri = UPRI(uq2->uq_thread);
2110 if (pri > uq->uq_inherited_pri)
2111 pri = uq->uq_inherited_pri;
2113 sched_lend_user_prio(td, pri);
2115 mtx_unlock_spin(&umtx_lock);
2119 mtx_lock_spin(&umtx_lock);
2120 uq->uq_inherited_pri = old_inherited_pri;
2122 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2123 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2125 if (pri > UPRI(uq2->uq_thread))
2126 pri = UPRI(uq2->uq_thread);
2129 if (pri > uq->uq_inherited_pri)
2130 pri = uq->uq_inherited_pri;
2132 sched_lend_user_prio(td, pri);
2134 mtx_unlock_spin(&umtx_lock);
2138 umtxq_lock(&uq->uq_key);
2139 umtxq_unbusy(&uq->uq_key);
2140 umtxq_unlock(&uq->uq_key);
2141 umtx_key_release(&uq->uq_key);
2146 * Unlock a PP mutex.
2149 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
2151 struct umtx_key key;
2152 struct umtx_q *uq, *uq2;
2156 int error, pri, new_inherited_pri, su;
2160 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2163 * Make sure we own this mtx.
2165 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
2169 if ((owner & ~UMUTEX_CONTESTED) != id)
2172 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2177 new_inherited_pri = PRI_MAX;
2179 rceiling = RTP_PRIO_MAX - rceiling;
2180 if (rceiling > RTP_PRIO_MAX)
2182 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2185 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2192 * For priority protected mutex, always set unlocked state
2193 * to UMUTEX_CONTESTED, so that userland always enters kernel
2194 * to lock the mutex, it is necessary because thread priority
2195 * has to be adjusted for such mutex.
2197 error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2202 umtxq_signal(&key, 1);
2209 mtx_lock_spin(&umtx_lock);
2211 uq->uq_inherited_pri = new_inherited_pri;
2213 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2214 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2216 if (pri > UPRI(uq2->uq_thread))
2217 pri = UPRI(uq2->uq_thread);
2220 if (pri > uq->uq_inherited_pri)
2221 pri = uq->uq_inherited_pri;
2223 sched_lend_user_prio(td, pri);
2225 mtx_unlock_spin(&umtx_lock);
2227 umtx_key_release(&key);
2232 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2233 uint32_t *old_ceiling)
2236 uint32_t save_ceiling;
2241 flags = fuword32(&m->m_flags);
2242 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2244 if (ceiling > RTP_PRIO_MAX)
2248 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2252 umtxq_lock(&uq->uq_key);
2253 umtxq_busy(&uq->uq_key);
2254 umtxq_unlock(&uq->uq_key);
2256 save_ceiling = fuword32(&m->m_ceilings[0]);
2258 owner = casuword32(&m->m_owner,
2259 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2261 if (owner == UMUTEX_CONTESTED) {
2262 suword32(&m->m_ceilings[0], ceiling);
2263 suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2269 /* The address was invalid. */
2275 if ((owner & ~UMUTEX_CONTESTED) == id) {
2276 suword32(&m->m_ceilings[0], ceiling);
2282 * If we caught a signal, we have retried and now
2289 * We set the contested bit, sleep. Otherwise the lock changed
2290 * and we need to retry or we lost a race to the thread
2291 * unlocking the umtx.
2293 umtxq_lock(&uq->uq_key);
2295 umtxq_unbusy(&uq->uq_key);
2296 error = umtxq_sleep(uq, "umtxpp", NULL);
2298 umtxq_unlock(&uq->uq_key);
2300 umtxq_lock(&uq->uq_key);
2302 umtxq_signal(&uq->uq_key, INT_MAX);
2303 umtxq_unbusy(&uq->uq_key);
2304 umtxq_unlock(&uq->uq_key);
2305 umtx_key_release(&uq->uq_key);
2306 if (error == 0 && old_ceiling != NULL)
2307 suword32(old_ceiling, save_ceiling);
2312 * Lock a userland POSIX mutex.
2315 do_lock_umutex(struct thread *td, struct umutex *m,
2316 struct _umtx_time *timeout, int mode)
2321 flags = fuword32(&m->m_flags);
2325 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2327 error = do_lock_normal(td, m, flags, timeout, mode);
2329 case UMUTEX_PRIO_INHERIT:
2330 error = do_lock_pi(td, m, flags, timeout, mode);
2332 case UMUTEX_PRIO_PROTECT:
2333 error = do_lock_pp(td, m, flags, timeout, mode);
2338 if (timeout == NULL) {
2339 if (error == EINTR && mode != _UMUTEX_WAIT)
2342 /* Timed-locking is not restarted. */
2343 if (error == ERESTART)
2350 * Unlock a userland POSIX mutex.
2353 do_unlock_umutex(struct thread *td, struct umutex *m)
2357 flags = fuword32(&m->m_flags);
2361 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2363 return (do_unlock_normal(td, m, flags));
2364 case UMUTEX_PRIO_INHERIT:
2365 return (do_unlock_pi(td, m, flags));
2366 case UMUTEX_PRIO_PROTECT:
2367 return (do_unlock_pp(td, m, flags));
2374 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2375 struct timespec *timeout, u_long wflags)
2377 struct abs_timeout timo;
2384 flags = fuword32(&cv->c_flags);
2385 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2389 if ((wflags & CVWAIT_CLOCKID) != 0) {
2390 clockid = fuword32(&cv->c_clockid);
2391 if (clockid < CLOCK_REALTIME ||
2392 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2393 /* hmm, only HW clock id will work. */
2397 clockid = CLOCK_REALTIME;
2400 umtxq_lock(&uq->uq_key);
2401 umtxq_busy(&uq->uq_key);
2403 umtxq_unlock(&uq->uq_key);
2406 * Set c_has_waiters to 1 before releasing user mutex, also
2407 * don't modify cache line when unnecessary.
2409 if (fuword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters)) == 0)
2410 suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1);
2412 umtxq_lock(&uq->uq_key);
2413 umtxq_unbusy(&uq->uq_key);
2414 umtxq_unlock(&uq->uq_key);
2416 error = do_unlock_umutex(td, m);
2418 if (timeout != NULL)
2419 abs_timeout_init(&timo, clockid, ((wflags & CVWAIT_ABSTIME) != 0),
2422 umtxq_lock(&uq->uq_key);
2424 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2428 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2432 * This must be timeout,interrupted by signal or
2433 * surprious wakeup, clear c_has_waiter flag when
2436 umtxq_busy(&uq->uq_key);
2437 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2438 int oldlen = uq->uq_cur_queue->length;
2441 umtxq_unlock(&uq->uq_key);
2443 __DEVOLATILE(uint32_t *,
2444 &cv->c_has_waiters), 0);
2445 umtxq_lock(&uq->uq_key);
2448 umtxq_unbusy(&uq->uq_key);
2449 if (error == ERESTART)
2453 umtxq_unlock(&uq->uq_key);
2454 umtx_key_release(&uq->uq_key);
2459 * Signal a userland condition variable.
2462 do_cv_signal(struct thread *td, struct ucond *cv)
2464 struct umtx_key key;
2465 int error, cnt, nwake;
2468 flags = fuword32(&cv->c_flags);
2469 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2473 cnt = umtxq_count(&key);
2474 nwake = umtxq_signal(&key, 1);
2478 __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2483 umtx_key_release(&key);
2488 do_cv_broadcast(struct thread *td, struct ucond *cv)
2490 struct umtx_key key;
2494 flags = fuword32(&cv->c_flags);
2495 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2500 umtxq_signal(&key, INT_MAX);
2503 error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2509 umtx_key_release(&key);
2514 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
2516 struct abs_timeout timo;
2518 uint32_t flags, wrflags;
2519 int32_t state, oldstate;
2520 int32_t blocked_readers;
2524 flags = fuword32(&rwlock->rw_flags);
2525 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2529 if (timeout != NULL)
2530 abs_timeout_init2(&timo, timeout);
2532 wrflags = URWLOCK_WRITE_OWNER;
2533 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2534 wrflags |= URWLOCK_WRITE_WAITERS;
2537 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2538 /* try to lock it */
2539 while (!(state & wrflags)) {
2540 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2541 umtx_key_release(&uq->uq_key);
2544 oldstate = casuword32(&rwlock->rw_state, state, state + 1);
2545 if (oldstate == state) {
2546 umtx_key_release(&uq->uq_key);
2555 /* grab monitor lock */
2556 umtxq_lock(&uq->uq_key);
2557 umtxq_busy(&uq->uq_key);
2558 umtxq_unlock(&uq->uq_key);
2561 * re-read the state, in case it changed between the try-lock above
2562 * and the check below
2564 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2566 /* set read contention bit */
2567 while ((state & wrflags) && !(state & URWLOCK_READ_WAITERS)) {
2568 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_READ_WAITERS);
2569 if (oldstate == state)
2574 /* state is changed while setting flags, restart */
2575 if (!(state & wrflags)) {
2576 umtxq_lock(&uq->uq_key);
2577 umtxq_unbusy(&uq->uq_key);
2578 umtxq_unlock(&uq->uq_key);
2583 /* contention bit is set, before sleeping, increase read waiter count */
2584 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2585 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2587 while (state & wrflags) {
2588 umtxq_lock(&uq->uq_key);
2590 umtxq_unbusy(&uq->uq_key);
2592 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2595 umtxq_busy(&uq->uq_key);
2597 umtxq_unlock(&uq->uq_key);
2600 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2603 /* decrease read waiter count, and may clear read contention bit */
2604 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2605 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2606 if (blocked_readers == 1) {
2607 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2609 oldstate = casuword32(&rwlock->rw_state, state,
2610 state & ~URWLOCK_READ_WAITERS);
2611 if (oldstate == state)
2617 umtxq_lock(&uq->uq_key);
2618 umtxq_unbusy(&uq->uq_key);
2619 umtxq_unlock(&uq->uq_key);
2621 umtx_key_release(&uq->uq_key);
2622 if (error == ERESTART)
2628 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2630 struct abs_timeout timo;
2633 int32_t state, oldstate;
2634 int32_t blocked_writers;
2635 int32_t blocked_readers;
2639 flags = fuword32(&rwlock->rw_flags);
2640 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2644 if (timeout != NULL)
2645 abs_timeout_init2(&timo, timeout);
2647 blocked_readers = 0;
2649 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2650 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2651 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER);
2652 if (oldstate == state) {
2653 umtx_key_release(&uq->uq_key);
2660 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2661 blocked_readers != 0) {
2662 umtxq_lock(&uq->uq_key);
2663 umtxq_busy(&uq->uq_key);
2664 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2665 umtxq_unbusy(&uq->uq_key);
2666 umtxq_unlock(&uq->uq_key);
2672 /* grab monitor lock */
2673 umtxq_lock(&uq->uq_key);
2674 umtxq_busy(&uq->uq_key);
2675 umtxq_unlock(&uq->uq_key);
2678 * re-read the state, in case it changed between the try-lock above
2679 * and the check below
2681 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2683 while (((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) &&
2684 (state & URWLOCK_WRITE_WAITERS) == 0) {
2685 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_WAITERS);
2686 if (oldstate == state)
2691 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2692 umtxq_lock(&uq->uq_key);
2693 umtxq_unbusy(&uq->uq_key);
2694 umtxq_unlock(&uq->uq_key);
2698 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2699 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2701 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2702 umtxq_lock(&uq->uq_key);
2703 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2704 umtxq_unbusy(&uq->uq_key);
2706 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2709 umtxq_busy(&uq->uq_key);
2710 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2711 umtxq_unlock(&uq->uq_key);
2714 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2717 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2718 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2719 if (blocked_writers == 1) {
2720 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2722 oldstate = casuword32(&rwlock->rw_state, state,
2723 state & ~URWLOCK_WRITE_WAITERS);
2724 if (oldstate == state)
2728 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2730 blocked_readers = 0;
2732 umtxq_lock(&uq->uq_key);
2733 umtxq_unbusy(&uq->uq_key);
2734 umtxq_unlock(&uq->uq_key);
2737 umtx_key_release(&uq->uq_key);
2738 if (error == ERESTART)
2744 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
2748 int32_t state, oldstate;
2749 int error, q, count;
2752 flags = fuword32(&rwlock->rw_flags);
2753 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2757 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2758 if (state & URWLOCK_WRITE_OWNER) {
2760 oldstate = casuword32(&rwlock->rw_state, state,
2761 state & ~URWLOCK_WRITE_OWNER);
2762 if (oldstate != state) {
2764 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
2771 } else if (URWLOCK_READER_COUNT(state) != 0) {
2773 oldstate = casuword32(&rwlock->rw_state, state,
2775 if (oldstate != state) {
2777 if (URWLOCK_READER_COUNT(oldstate) == 0) {
2792 if (!(flags & URWLOCK_PREFER_READER)) {
2793 if (state & URWLOCK_WRITE_WAITERS) {
2795 q = UMTX_EXCLUSIVE_QUEUE;
2796 } else if (state & URWLOCK_READ_WAITERS) {
2798 q = UMTX_SHARED_QUEUE;
2801 if (state & URWLOCK_READ_WAITERS) {
2803 q = UMTX_SHARED_QUEUE;
2804 } else if (state & URWLOCK_WRITE_WAITERS) {
2806 q = UMTX_EXCLUSIVE_QUEUE;
2811 umtxq_lock(&uq->uq_key);
2812 umtxq_busy(&uq->uq_key);
2813 umtxq_signal_queue(&uq->uq_key, count, q);
2814 umtxq_unbusy(&uq->uq_key);
2815 umtxq_unlock(&uq->uq_key);
2818 umtx_key_release(&uq->uq_key);
2823 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
2825 struct abs_timeout timo;
2827 uint32_t flags, count;
2831 flags = fuword32(&sem->_flags);
2832 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
2836 if (timeout != NULL)
2837 abs_timeout_init2(&timo, timeout);
2839 umtxq_lock(&uq->uq_key);
2840 umtxq_busy(&uq->uq_key);
2842 umtxq_unlock(&uq->uq_key);
2843 casuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0, 1);
2844 count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count));
2846 umtxq_lock(&uq->uq_key);
2847 umtxq_unbusy(&uq->uq_key);
2849 umtxq_unlock(&uq->uq_key);
2850 umtx_key_release(&uq->uq_key);
2853 umtxq_lock(&uq->uq_key);
2854 umtxq_unbusy(&uq->uq_key);
2856 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
2858 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2862 if (error == ERESTART)
2865 umtxq_unlock(&uq->uq_key);
2866 umtx_key_release(&uq->uq_key);
2871 * Signal a userland condition variable.
2874 do_sem_wake(struct thread *td, struct _usem *sem)
2876 struct umtx_key key;
2880 flags = fuword32(&sem->_flags);
2881 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
2885 cnt = umtxq_count(&key);
2887 umtxq_signal(&key, 1);
2889 * Check if count is greater than 0, this means the memory is
2890 * still being referenced by user code, so we can safely
2891 * update _has_waiters flag.
2896 __DEVOLATILE(uint32_t *, &sem->_has_waiters), 0);
2902 umtx_key_release(&key);
2907 sys__umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
2908 /* struct umtx *umtx */
2910 return do_lock_umtx(td, uap->umtx, td->td_tid, 0);
2914 sys__umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
2915 /* struct umtx *umtx */
2917 return do_unlock_umtx(td, uap->umtx, td->td_tid);
2921 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
2925 error = copyin(addr, tsp, sizeof(struct timespec));
2927 if (tsp->tv_sec < 0 ||
2928 tsp->tv_nsec >= 1000000000 ||
2936 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
2940 if (size <= sizeof(struct timespec)) {
2941 tp->_clockid = CLOCK_REALTIME;
2943 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
2945 error = copyin(addr, tp, sizeof(struct _umtx_time));
2948 if (tp->_timeout.tv_sec < 0 ||
2949 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
2955 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap)
2957 struct timespec *ts, timeout;
2960 /* Allow a null timespec (wait forever). */
2961 if (uap->uaddr2 == NULL)
2964 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
2969 return (do_lock_umtx(td, uap->obj, uap->val, ts));
2973 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap)
2975 return (do_unlock_umtx(td, uap->obj, uap->val));
2979 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
2981 struct _umtx_time timeout, *tm_p;
2984 if (uap->uaddr2 == NULL)
2987 error = umtx_copyin_umtx_time(
2988 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
2993 return do_wait(td, uap->obj, uap->val, tm_p, 0, 0);
2997 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
2999 struct _umtx_time timeout, *tm_p;
3002 if (uap->uaddr2 == NULL)
3005 error = umtx_copyin_umtx_time(
3006 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3011 return do_wait(td, uap->obj, uap->val, tm_p, 1, 0);
3015 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3017 struct _umtx_time *tm_p, timeout;
3020 if (uap->uaddr2 == NULL)
3023 error = umtx_copyin_umtx_time(
3024 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3029 return do_wait(td, uap->obj, uap->val, tm_p, 1, 1);
3033 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3035 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3038 #define BATCH_SIZE 128
3040 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3042 int count = uap->val;
3043 void *uaddrs[BATCH_SIZE];
3044 char **upp = (char **)uap->obj;
3051 if (tocopy > BATCH_SIZE)
3052 tocopy = BATCH_SIZE;
3053 error = copyin(upp+pos, uaddrs, tocopy * sizeof(char *));
3056 for (i = 0; i < tocopy; ++i)
3057 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3065 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3067 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3071 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3073 struct _umtx_time *tm_p, timeout;
3076 /* Allow a null timespec (wait forever). */
3077 if (uap->uaddr2 == NULL)
3080 error = umtx_copyin_umtx_time(
3081 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3086 return do_lock_umutex(td, uap->obj, tm_p, 0);
3090 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3092 return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY);
3096 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3098 struct _umtx_time *tm_p, timeout;
3101 /* Allow a null timespec (wait forever). */
3102 if (uap->uaddr2 == NULL)
3105 error = umtx_copyin_umtx_time(
3106 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3111 return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT);
3115 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3117 return do_wake_umutex(td, uap->obj);
3121 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3123 return do_unlock_umutex(td, uap->obj);
3127 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3129 return do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1);
3133 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3135 struct timespec *ts, timeout;
3138 /* Allow a null timespec (wait forever). */
3139 if (uap->uaddr2 == NULL)
3142 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3147 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3151 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3153 return do_cv_signal(td, uap->obj);
3157 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3159 return do_cv_broadcast(td, uap->obj);
3163 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3165 struct _umtx_time timeout;
3168 /* Allow a null timespec (wait forever). */
3169 if (uap->uaddr2 == NULL) {
3170 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3172 error = umtx_copyin_umtx_time(uap->uaddr2,
3173 (size_t)uap->uaddr1, &timeout);
3176 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3182 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3184 struct _umtx_time timeout;
3187 /* Allow a null timespec (wait forever). */
3188 if (uap->uaddr2 == NULL) {
3189 error = do_rw_wrlock(td, uap->obj, 0);
3191 error = umtx_copyin_umtx_time(uap->uaddr2,
3192 (size_t)uap->uaddr1, &timeout);
3196 error = do_rw_wrlock(td, uap->obj, &timeout);
3202 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3204 return do_rw_unlock(td, uap->obj);
3208 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3210 struct _umtx_time *tm_p, timeout;
3213 /* Allow a null timespec (wait forever). */
3214 if (uap->uaddr2 == NULL)
3217 error = umtx_copyin_umtx_time(
3218 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3223 return (do_sem_wait(td, uap->obj, tm_p));
3227 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3229 return do_sem_wake(td, uap->obj);
3233 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3235 return do_wake2_umutex(td, uap->obj, uap->val);
3238 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3240 static _umtx_op_func op_table[] = {
3241 __umtx_op_lock_umtx, /* UMTX_OP_LOCK */
3242 __umtx_op_unlock_umtx, /* UMTX_OP_UNLOCK */
3243 __umtx_op_wait, /* UMTX_OP_WAIT */
3244 __umtx_op_wake, /* UMTX_OP_WAKE */
3245 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_TRYLOCK */
3246 __umtx_op_lock_umutex, /* UMTX_OP_MUTEX_LOCK */
3247 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
3248 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
3249 __umtx_op_cv_wait, /* UMTX_OP_CV_WAIT*/
3250 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
3251 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
3252 __umtx_op_wait_uint, /* UMTX_OP_WAIT_UINT */
3253 __umtx_op_rw_rdlock, /* UMTX_OP_RW_RDLOCK */
3254 __umtx_op_rw_wrlock, /* UMTX_OP_RW_WRLOCK */
3255 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
3256 __umtx_op_wait_uint_private, /* UMTX_OP_WAIT_UINT_PRIVATE */
3257 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
3258 __umtx_op_wait_umutex, /* UMTX_OP_UMUTEX_WAIT */
3259 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
3260 __umtx_op_sem_wait, /* UMTX_OP_SEM_WAIT */
3261 __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */
3262 __umtx_op_nwake_private, /* UMTX_OP_NWAKE_PRIVATE */
3263 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
3267 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
3269 if ((unsigned)uap->op < UMTX_OP_MAX)
3270 return (*op_table[uap->op])(td, uap);
3274 #ifdef COMPAT_FREEBSD32
3276 freebsd32_umtx_lock(struct thread *td, struct freebsd32_umtx_lock_args *uap)
3277 /* struct umtx *umtx */
3279 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
3283 freebsd32_umtx_unlock(struct thread *td, struct freebsd32_umtx_unlock_args *uap)
3284 /* struct umtx *umtx */
3286 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
3294 struct umtx_time32 {
3295 struct timespec32 timeout;
3301 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
3303 struct timespec32 ts32;
3306 error = copyin(addr, &ts32, sizeof(struct timespec32));
3308 if (ts32.tv_sec < 0 ||
3309 ts32.tv_nsec >= 1000000000 ||
3313 tsp->tv_sec = ts32.tv_sec;
3314 tsp->tv_nsec = ts32.tv_nsec;
3321 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
3323 struct umtx_time32 t32;
3326 t32.clockid = CLOCK_REALTIME;
3328 if (size <= sizeof(struct timespec32))
3329 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
3331 error = copyin(addr, &t32, sizeof(struct umtx_time32));
3334 if (t32.timeout.tv_sec < 0 ||
3335 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
3337 tp->_timeout.tv_sec = t32.timeout.tv_sec;
3338 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
3339 tp->_flags = t32.flags;
3340 tp->_clockid = t32.clockid;
3345 __umtx_op_lock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3347 struct timespec *ts, timeout;
3350 /* Allow a null timespec (wait forever). */
3351 if (uap->uaddr2 == NULL)
3354 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3359 return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3363 __umtx_op_unlock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3365 return (do_unlock_umtx32(td, uap->obj, (uint32_t)uap->val));
3369 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3371 struct _umtx_time *tm_p, timeout;
3374 if (uap->uaddr2 == NULL)
3377 error = umtx_copyin_umtx_time32(uap->uaddr2,
3378 (size_t)uap->uaddr1, &timeout);
3383 return do_wait(td, uap->obj, uap->val, tm_p, 1, 0);
3387 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3389 struct _umtx_time *tm_p, timeout;
3392 /* Allow a null timespec (wait forever). */
3393 if (uap->uaddr2 == NULL)
3396 error = umtx_copyin_umtx_time(uap->uaddr2,
3397 (size_t)uap->uaddr1, &timeout);
3402 return do_lock_umutex(td, uap->obj, tm_p, 0);
3406 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3408 struct _umtx_time *tm_p, timeout;
3411 /* Allow a null timespec (wait forever). */
3412 if (uap->uaddr2 == NULL)
3415 error = umtx_copyin_umtx_time32(uap->uaddr2,
3416 (size_t)uap->uaddr1, &timeout);
3421 return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT);
3425 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3427 struct timespec *ts, timeout;
3430 /* Allow a null timespec (wait forever). */
3431 if (uap->uaddr2 == NULL)
3434 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3439 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3443 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3445 struct _umtx_time timeout;
3448 /* Allow a null timespec (wait forever). */
3449 if (uap->uaddr2 == NULL) {
3450 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3452 error = umtx_copyin_umtx_time32(uap->uaddr2,
3453 (size_t)uap->uaddr1, &timeout);
3456 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3462 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3464 struct _umtx_time timeout;
3467 /* Allow a null timespec (wait forever). */
3468 if (uap->uaddr2 == NULL) {
3469 error = do_rw_wrlock(td, uap->obj, 0);
3471 error = umtx_copyin_umtx_time32(uap->uaddr2,
3472 (size_t)uap->uaddr1, &timeout);
3475 error = do_rw_wrlock(td, uap->obj, &timeout);
3481 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3483 struct _umtx_time *tm_p, timeout;
3486 if (uap->uaddr2 == NULL)
3489 error = umtx_copyin_umtx_time32(
3490 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
3495 return do_wait(td, uap->obj, uap->val, tm_p, 1, 1);
3499 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3501 struct _umtx_time *tm_p, timeout;
3504 /* Allow a null timespec (wait forever). */
3505 if (uap->uaddr2 == NULL)
3508 error = umtx_copyin_umtx_time32(uap->uaddr2,
3509 (size_t)uap->uaddr1, &timeout);
3514 return (do_sem_wait(td, uap->obj, tm_p));
3518 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
3520 int count = uap->val;
3521 uint32_t uaddrs[BATCH_SIZE];
3522 uint32_t **upp = (uint32_t **)uap->obj;
3529 if (tocopy > BATCH_SIZE)
3530 tocopy = BATCH_SIZE;
3531 error = copyin(upp+pos, uaddrs, tocopy * sizeof(uint32_t));
3534 for (i = 0; i < tocopy; ++i)
3535 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
3543 static _umtx_op_func op_table_compat32[] = {
3544 __umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */
3545 __umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */
3546 __umtx_op_wait_compat32, /* UMTX_OP_WAIT */
3547 __umtx_op_wake, /* UMTX_OP_WAKE */
3548 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_LOCK */
3549 __umtx_op_lock_umutex_compat32, /* UMTX_OP_MUTEX_TRYLOCK */
3550 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
3551 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
3552 __umtx_op_cv_wait_compat32, /* UMTX_OP_CV_WAIT*/
3553 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
3554 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
3555 __umtx_op_wait_compat32, /* UMTX_OP_WAIT_UINT */
3556 __umtx_op_rw_rdlock_compat32, /* UMTX_OP_RW_RDLOCK */
3557 __umtx_op_rw_wrlock_compat32, /* UMTX_OP_RW_WRLOCK */
3558 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
3559 __umtx_op_wait_uint_private_compat32, /* UMTX_OP_WAIT_UINT_PRIVATE */
3560 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
3561 __umtx_op_wait_umutex_compat32, /* UMTX_OP_UMUTEX_WAIT */
3562 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
3563 __umtx_op_sem_wait_compat32, /* UMTX_OP_SEM_WAIT */
3564 __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */
3565 __umtx_op_nwake_private32, /* UMTX_OP_NWAKE_PRIVATE */
3566 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
3570 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
3572 if ((unsigned)uap->op < UMTX_OP_MAX)
3573 return (*op_table_compat32[uap->op])(td,
3574 (struct _umtx_op_args *)uap);
3580 umtx_thread_init(struct thread *td)
3582 td->td_umtxq = umtxq_alloc();
3583 td->td_umtxq->uq_thread = td;
3587 umtx_thread_fini(struct thread *td)
3589 umtxq_free(td->td_umtxq);
3593 * It will be called when new thread is created, e.g fork().
3596 umtx_thread_alloc(struct thread *td)
3601 uq->uq_inherited_pri = PRI_MAX;
3603 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
3604 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
3605 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
3606 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
3613 umtx_exec_hook(void *arg __unused, struct proc *p __unused,
3614 struct image_params *imgp __unused)
3616 umtx_thread_cleanup(curthread);
3620 * thread_exit() hook.
3623 umtx_thread_exit(struct thread *td)
3625 umtx_thread_cleanup(td);
3629 * clean up umtx data.
3632 umtx_thread_cleanup(struct thread *td)
3637 if ((uq = td->td_umtxq) == NULL)
3640 mtx_lock_spin(&umtx_lock);
3641 uq->uq_inherited_pri = PRI_MAX;
3642 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
3643 pi->pi_owner = NULL;
3644 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
3646 mtx_unlock_spin(&umtx_lock);
3648 sched_lend_user_prio(td, PRI_MAX);