2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_compat.h"
32 #include "opt_umtx_profiling.h"
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
45 #include <sys/sysent.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/eventhandler.h>
53 #include <vm/vm_param.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_object.h>
58 #include <machine/cpu.h>
60 #ifdef COMPAT_FREEBSD32
61 #include <compat/freebsd32/freebsd32_proto.h>
65 #define _UMUTEX_WAIT 2
67 /* Priority inheritance mutex info. */
70 struct thread *pi_owner;
75 /* List entry to link umtx holding by thread */
76 TAILQ_ENTRY(umtx_pi) pi_link;
78 /* List entry in hash */
79 TAILQ_ENTRY(umtx_pi) pi_hashlink;
81 /* List for waiters */
82 TAILQ_HEAD(,umtx_q) pi_blocked;
84 /* Identify a userland lock object */
85 struct umtx_key pi_key;
88 /* A userland synchronous object user. */
90 /* Linked list for the hash. */
91 TAILQ_ENTRY(umtx_q) uq_link;
94 struct umtx_key uq_key;
98 #define UQF_UMTXQ 0x0001
100 /* The thread waits on. */
101 struct thread *uq_thread;
104 * Blocked on PI mutex. read can use chain lock
105 * or umtx_lock, write must have both chain lock and
106 * umtx_lock being hold.
108 struct umtx_pi *uq_pi_blocked;
110 /* On blocked list */
111 TAILQ_ENTRY(umtx_q) uq_lockq;
113 /* Thread contending with us */
114 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
116 /* Inherited priority from PP mutex */
117 u_char uq_inherited_pri;
119 /* Spare queue ready to be reused */
120 struct umtxq_queue *uq_spare_queue;
122 /* The queue we on */
123 struct umtxq_queue *uq_cur_queue;
126 TAILQ_HEAD(umtxq_head, umtx_q);
128 /* Per-key wait-queue */
130 struct umtxq_head head;
132 LIST_ENTRY(umtxq_queue) link;
136 LIST_HEAD(umtxq_list, umtxq_queue);
138 /* Userland lock object's wait-queue chain */
140 /* Lock for this chain. */
143 /* List of sleep queues. */
144 struct umtxq_list uc_queue[2];
145 #define UMTX_SHARED_QUEUE 0
146 #define UMTX_EXCLUSIVE_QUEUE 1
148 LIST_HEAD(, umtxq_queue) uc_spare_queue;
153 /* Chain lock waiters */
156 /* All PI in the list */
157 TAILQ_HEAD(,umtx_pi) uc_pi_list;
159 #ifdef UMTX_PROFILING
165 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
166 #define UMTXQ_BUSY_ASSERT(uc) KASSERT(&(uc)->uc_busy, ("umtx chain is not busy"))
169 * Don't propagate time-sharing priority, there is a security reason,
170 * a user can simply introduce PI-mutex, let thread A lock the mutex,
171 * and let another thread B block on the mutex, because B is
172 * sleeping, its priority will be boosted, this causes A's priority to
173 * be boosted via priority propagating too and will never be lowered even
174 * if it is using 100%CPU, this is unfair to other processes.
177 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
178 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
179 PRI_MAX_TIMESHARE : (td)->td_user_pri)
181 #define GOLDEN_RATIO_PRIME 2654404609U
182 #define UMTX_CHAINS 512
183 #define UMTX_SHIFTS (__WORD_BIT - 9)
185 #define GET_SHARE(flags) \
186 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
188 #define BUSY_SPINS 200
196 static uma_zone_t umtx_pi_zone;
197 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
198 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
199 static int umtx_pi_allocated;
201 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
202 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
203 &umtx_pi_allocated, 0, "Allocated umtx_pi");
205 #ifdef UMTX_PROFILING
206 static long max_length;
207 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
208 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
211 static void umtxq_sysinit(void *);
212 static void umtxq_hash(struct umtx_key *key);
213 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
214 static void umtxq_lock(struct umtx_key *key);
215 static void umtxq_unlock(struct umtx_key *key);
216 static void umtxq_busy(struct umtx_key *key);
217 static void umtxq_unbusy(struct umtx_key *key);
218 static void umtxq_insert_queue(struct umtx_q *uq, int q);
219 static void umtxq_remove_queue(struct umtx_q *uq, int q);
220 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
221 static int umtxq_count(struct umtx_key *key);
222 static struct umtx_pi *umtx_pi_alloc(int);
223 static void umtx_pi_free(struct umtx_pi *pi);
224 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags);
225 static void umtx_thread_cleanup(struct thread *td);
226 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
227 struct image_params *imgp __unused);
228 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
230 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
231 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
232 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
234 static struct mtx umtx_lock;
236 #ifdef UMTX_PROFILING
238 umtx_init_profiling(void)
240 struct sysctl_oid *chain_oid;
244 for (i = 0; i < UMTX_CHAINS; ++i) {
245 snprintf(chain_name, sizeof(chain_name), "%d", i);
246 chain_oid = SYSCTL_ADD_NODE(NULL,
247 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
248 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
249 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
250 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
251 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
252 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
258 umtxq_sysinit(void *arg __unused)
262 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
263 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
264 for (i = 0; i < 2; ++i) {
265 for (j = 0; j < UMTX_CHAINS; ++j) {
266 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
267 MTX_DEF | MTX_DUPOK);
268 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
269 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
270 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
271 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
272 umtxq_chains[i][j].uc_busy = 0;
273 umtxq_chains[i][j].uc_waiters = 0;
274 #ifdef UMTX_PROFILING
275 umtxq_chains[i][j].length = 0;
276 umtxq_chains[i][j].max_length = 0;
280 #ifdef UMTX_PROFILING
281 umtx_init_profiling();
283 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN);
284 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
285 EVENTHANDLER_PRI_ANY);
293 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
294 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO);
295 TAILQ_INIT(&uq->uq_spare_queue->head);
296 TAILQ_INIT(&uq->uq_pi_contested);
297 uq->uq_inherited_pri = PRI_MAX;
302 umtxq_free(struct umtx_q *uq)
304 MPASS(uq->uq_spare_queue != NULL);
305 free(uq->uq_spare_queue, M_UMTX);
310 umtxq_hash(struct umtx_key *key)
312 unsigned n = (uintptr_t)key->info.both.a + key->info.both.b;
313 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
316 static inline struct umtxq_chain *
317 umtxq_getchain(struct umtx_key *key)
319 if (key->type <= TYPE_SEM)
320 return (&umtxq_chains[1][key->hash]);
321 return (&umtxq_chains[0][key->hash]);
328 umtxq_lock(struct umtx_key *key)
330 struct umtxq_chain *uc;
332 uc = umtxq_getchain(key);
333 mtx_lock(&uc->uc_lock);
340 umtxq_unlock(struct umtx_key *key)
342 struct umtxq_chain *uc;
344 uc = umtxq_getchain(key);
345 mtx_unlock(&uc->uc_lock);
349 * Set chain to busy state when following operation
350 * may be blocked (kernel mutex can not be used).
353 umtxq_busy(struct umtx_key *key)
355 struct umtxq_chain *uc;
357 uc = umtxq_getchain(key);
358 mtx_assert(&uc->uc_lock, MA_OWNED);
362 int count = BUSY_SPINS;
365 while (uc->uc_busy && --count > 0)
371 while (uc->uc_busy) {
373 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
384 umtxq_unbusy(struct umtx_key *key)
386 struct umtxq_chain *uc;
388 uc = umtxq_getchain(key);
389 mtx_assert(&uc->uc_lock, MA_OWNED);
390 KASSERT(uc->uc_busy != 0, ("not busy"));
396 static struct umtxq_queue *
397 umtxq_queue_lookup(struct umtx_key *key, int q)
399 struct umtxq_queue *uh;
400 struct umtxq_chain *uc;
402 uc = umtxq_getchain(key);
403 UMTXQ_LOCKED_ASSERT(uc);
404 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
405 if (umtx_key_match(&uh->key, key))
413 umtxq_insert_queue(struct umtx_q *uq, int q)
415 struct umtxq_queue *uh;
416 struct umtxq_chain *uc;
418 uc = umtxq_getchain(&uq->uq_key);
419 UMTXQ_LOCKED_ASSERT(uc);
420 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
421 uh = umtxq_queue_lookup(&uq->uq_key, q);
423 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
425 uh = uq->uq_spare_queue;
426 uh->key = uq->uq_key;
427 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
429 uq->uq_spare_queue = NULL;
431 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
433 #ifdef UMTX_PROFILING
435 if (uc->length > uc->max_length) {
436 uc->max_length = uc->length;
437 if (uc->max_length > max_length)
438 max_length = uc->max_length;
441 uq->uq_flags |= UQF_UMTXQ;
442 uq->uq_cur_queue = uh;
447 umtxq_remove_queue(struct umtx_q *uq, int q)
449 struct umtxq_chain *uc;
450 struct umtxq_queue *uh;
452 uc = umtxq_getchain(&uq->uq_key);
453 UMTXQ_LOCKED_ASSERT(uc);
454 if (uq->uq_flags & UQF_UMTXQ) {
455 uh = uq->uq_cur_queue;
456 TAILQ_REMOVE(&uh->head, uq, uq_link);
458 #ifdef UMTX_PROFILING
461 uq->uq_flags &= ~UQF_UMTXQ;
462 if (TAILQ_EMPTY(&uh->head)) {
463 KASSERT(uh->length == 0,
464 ("inconsistent umtxq_queue length"));
465 LIST_REMOVE(uh, link);
467 uh = LIST_FIRST(&uc->uc_spare_queue);
468 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
469 LIST_REMOVE(uh, link);
471 uq->uq_spare_queue = uh;
472 uq->uq_cur_queue = NULL;
477 * Check if there are multiple waiters
480 umtxq_count(struct umtx_key *key)
482 struct umtxq_chain *uc;
483 struct umtxq_queue *uh;
485 uc = umtxq_getchain(key);
486 UMTXQ_LOCKED_ASSERT(uc);
487 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
494 * Check if there are multiple PI waiters and returns first
498 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
500 struct umtxq_chain *uc;
501 struct umtxq_queue *uh;
504 uc = umtxq_getchain(key);
505 UMTXQ_LOCKED_ASSERT(uc);
506 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
508 *first = TAILQ_FIRST(&uh->head);
515 * Wake up threads waiting on an userland object.
519 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
521 struct umtxq_chain *uc;
522 struct umtxq_queue *uh;
527 uc = umtxq_getchain(key);
528 UMTXQ_LOCKED_ASSERT(uc);
529 uh = umtxq_queue_lookup(key, q);
531 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
532 umtxq_remove_queue(uq, q);
543 * Wake up specified thread.
546 umtxq_signal_thread(struct umtx_q *uq)
548 struct umtxq_chain *uc;
550 uc = umtxq_getchain(&uq->uq_key);
551 UMTXQ_LOCKED_ASSERT(uc);
557 tstohz(const struct timespec *tsp)
561 TIMESPEC_TO_TIMEVAL(&tv, tsp);
566 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
567 const struct timespec *timeout)
570 timo->clockid = clockid;
572 kern_clock_gettime(curthread, clockid, &timo->end);
573 timo->cur = timo->end;
574 timespecadd(&timo->end, timeout);
576 timo->end = *timeout;
577 kern_clock_gettime(curthread, clockid, &timo->cur);
582 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
585 abs_timeout_init(timo, umtxtime->_clockid,
586 (umtxtime->_flags & UMTX_ABSTIME) != 0,
587 &umtxtime->_timeout);
591 abs_timeout_update(struct abs_timeout *timo)
593 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
597 abs_timeout_gethz(struct abs_timeout *timo)
601 if (timespeccmp(&timo->end, &timo->cur, <=))
604 timespecsub(&tts, &timo->cur);
605 return (tstohz(&tts));
609 * Put thread into sleep state, before sleeping, check if
610 * thread was removed from umtx queue.
613 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
615 struct umtxq_chain *uc;
618 uc = umtxq_getchain(&uq->uq_key);
619 UMTXQ_LOCKED_ASSERT(uc);
621 if (!(uq->uq_flags & UQF_UMTXQ))
623 if (abstime != NULL) {
624 timo = abs_timeout_gethz(abstime);
629 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
630 if (error != EWOULDBLOCK) {
631 umtxq_lock(&uq->uq_key);
635 abs_timeout_update(abstime);
636 umtxq_lock(&uq->uq_key);
642 * Convert userspace address into unique logical address.
645 umtx_key_get(void *addr, int type, int share, struct umtx_key *key)
647 struct thread *td = curthread;
649 vm_map_entry_t entry;
655 if (share == THREAD_SHARE) {
657 key->info.private.vs = td->td_proc->p_vmspace;
658 key->info.private.addr = (uintptr_t)addr;
660 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
661 map = &td->td_proc->p_vmspace->vm_map;
662 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
663 &entry, &key->info.shared.object, &pindex, &prot,
664 &wired) != KERN_SUCCESS) {
668 if ((share == PROCESS_SHARE) ||
669 (share == AUTO_SHARE &&
670 VM_INHERIT_SHARE == entry->inheritance)) {
672 key->info.shared.offset = entry->offset + entry->start -
674 vm_object_reference(key->info.shared.object);
677 key->info.private.vs = td->td_proc->p_vmspace;
678 key->info.private.addr = (uintptr_t)addr;
680 vm_map_lookup_done(map, entry);
691 umtx_key_release(struct umtx_key *key)
694 vm_object_deallocate(key->info.shared.object);
698 * Lock a umtx object.
701 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id,
702 const struct timespec *timeout)
704 struct abs_timeout timo;
712 abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
715 * Care must be exercised when dealing with umtx structure. It
716 * can fault on any access.
720 * Try the uncontested case. This should be done in userland.
722 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id);
724 /* The acquire succeeded. */
725 if (owner == UMTX_UNOWNED)
728 /* The address was invalid. */
732 /* If no one owns it but it is contested try to acquire it. */
733 if (owner == UMTX_CONTESTED) {
734 owner = casuword(&umtx->u_owner,
735 UMTX_CONTESTED, id | UMTX_CONTESTED);
737 if (owner == UMTX_CONTESTED)
740 /* The address was invalid. */
744 /* If this failed the lock has changed, restart. */
749 * If we caught a signal, we have retried and now
755 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK,
756 AUTO_SHARE, &uq->uq_key)) != 0)
759 umtxq_lock(&uq->uq_key);
760 umtxq_busy(&uq->uq_key);
762 umtxq_unbusy(&uq->uq_key);
763 umtxq_unlock(&uq->uq_key);
766 * Set the contested bit so that a release in user space
767 * knows to use the system call for unlock. If this fails
768 * either some one else has acquired the lock or it has been
771 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
773 /* The address was invalid. */
775 umtxq_lock(&uq->uq_key);
777 umtxq_unlock(&uq->uq_key);
778 umtx_key_release(&uq->uq_key);
783 * We set the contested bit, sleep. Otherwise the lock changed
784 * and we need to retry or we lost a race to the thread
785 * unlocking the umtx.
787 umtxq_lock(&uq->uq_key);
789 error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL :
792 umtxq_unlock(&uq->uq_key);
793 umtx_key_release(&uq->uq_key);
796 if (timeout == NULL) {
797 /* Mutex locking is restarted if it is interrupted. */
801 /* Timed-locking is not restarted. */
802 if (error == ERESTART)
809 * Unlock a umtx object.
812 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
821 * Make sure we own this mtx.
823 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
827 if ((owner & ~UMTX_CONTESTED) != id)
830 /* This should be done in userland */
831 if ((owner & UMTX_CONTESTED) == 0) {
832 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
840 /* We should only ever be in here for contested locks */
841 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
847 count = umtxq_count(&key);
851 * When unlocking the umtx, it must be marked as unowned if
852 * there is zero or one thread only waiting for it.
853 * Otherwise, it must be marked as contested.
855 old = casuword(&umtx->u_owner, owner,
856 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
858 umtxq_signal(&key,1);
861 umtx_key_release(&key);
869 #ifdef COMPAT_FREEBSD32
872 * Lock a umtx object.
875 do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id,
876 const struct timespec *timeout)
878 struct abs_timeout timo;
887 abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
890 * Care must be exercised when dealing with umtx structure. It
891 * can fault on any access.
895 * Try the uncontested case. This should be done in userland.
897 owner = casuword32(m, UMUTEX_UNOWNED, id);
899 /* The acquire succeeded. */
900 if (owner == UMUTEX_UNOWNED)
903 /* The address was invalid. */
907 /* If no one owns it but it is contested try to acquire it. */
908 if (owner == UMUTEX_CONTESTED) {
909 owner = casuword32(m,
910 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
911 if (owner == UMUTEX_CONTESTED)
914 /* The address was invalid. */
918 /* If this failed the lock has changed, restart. */
923 * If we caught a signal, we have retried and now
929 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK,
930 AUTO_SHARE, &uq->uq_key)) != 0)
933 umtxq_lock(&uq->uq_key);
934 umtxq_busy(&uq->uq_key);
936 umtxq_unbusy(&uq->uq_key);
937 umtxq_unlock(&uq->uq_key);
940 * Set the contested bit so that a release in user space
941 * knows to use the system call for unlock. If this fails
942 * either some one else has acquired the lock or it has been
945 old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
947 /* The address was invalid. */
949 umtxq_lock(&uq->uq_key);
951 umtxq_unlock(&uq->uq_key);
952 umtx_key_release(&uq->uq_key);
957 * We set the contested bit, sleep. Otherwise the lock changed
958 * and we need to retry or we lost a race to the thread
959 * unlocking the umtx.
961 umtxq_lock(&uq->uq_key);
963 error = umtxq_sleep(uq, "umtx", timeout == NULL ?
966 umtxq_unlock(&uq->uq_key);
967 umtx_key_release(&uq->uq_key);
970 if (timeout == NULL) {
971 /* Mutex locking is restarted if it is interrupted. */
975 /* Timed-locking is not restarted. */
976 if (error == ERESTART)
983 * Unlock a umtx object.
986 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id)
995 * Make sure we own this mtx.
1001 if ((owner & ~UMUTEX_CONTESTED) != id)
1004 /* This should be done in userland */
1005 if ((owner & UMUTEX_CONTESTED) == 0) {
1006 old = casuword32(m, owner, UMUTEX_UNOWNED);
1014 /* We should only ever be in here for contested locks */
1015 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1021 count = umtxq_count(&key);
1025 * When unlocking the umtx, it must be marked as unowned if
1026 * there is zero or one thread only waiting for it.
1027 * Otherwise, it must be marked as contested.
1029 old = casuword32(m, owner,
1030 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1032 umtxq_signal(&key,1);
1035 umtx_key_release(&key);
1045 * Fetch and compare value, sleep on the address if value is not changed.
1048 do_wait(struct thread *td, void *addr, u_long id,
1049 struct _umtx_time *timeout, int compat32, int is_private)
1051 struct abs_timeout timo;
1057 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
1058 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1061 if (timeout != NULL)
1062 abs_timeout_init2(&timo, timeout);
1064 umtxq_lock(&uq->uq_key);
1066 umtxq_unlock(&uq->uq_key);
1070 tmp = (unsigned int)fuword32(addr);
1071 umtxq_lock(&uq->uq_key);
1073 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
1075 if ((uq->uq_flags & UQF_UMTXQ) == 0)
1079 umtxq_unlock(&uq->uq_key);
1080 umtx_key_release(&uq->uq_key);
1081 if (error == ERESTART)
1087 * Wake up threads sleeping on the specified address.
1090 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1092 struct umtx_key key;
1095 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1096 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1099 ret = umtxq_signal(&key, n_wake);
1101 umtx_key_release(&key);
1106 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1109 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1110 struct _umtx_time *timeout, int mode)
1112 struct abs_timeout timo;
1114 uint32_t owner, old, id;
1120 if (timeout != NULL)
1121 abs_timeout_init2(&timo, timeout);
1124 * Care must be exercised when dealing with umtx structure. It
1125 * can fault on any access.
1128 owner = fuword32(__DEVOLATILE(void *, &m->m_owner));
1129 if (mode == _UMUTEX_WAIT) {
1130 if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED)
1134 * Try the uncontested case. This should be done in userland.
1136 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1138 /* The acquire succeeded. */
1139 if (owner == UMUTEX_UNOWNED)
1142 /* The address was invalid. */
1146 /* If no one owns it but it is contested try to acquire it. */
1147 if (owner == UMUTEX_CONTESTED) {
1148 owner = casuword32(&m->m_owner,
1149 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1151 if (owner == UMUTEX_CONTESTED)
1154 /* The address was invalid. */
1158 /* If this failed the lock has changed, restart. */
1163 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1164 (owner & ~UMUTEX_CONTESTED) == id)
1167 if (mode == _UMUTEX_TRY)
1171 * If we caught a signal, we have retried and now
1177 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1178 GET_SHARE(flags), &uq->uq_key)) != 0)
1181 umtxq_lock(&uq->uq_key);
1182 umtxq_busy(&uq->uq_key);
1184 umtxq_unlock(&uq->uq_key);
1187 * Set the contested bit so that a release in user space
1188 * knows to use the system call for unlock. If this fails
1189 * either some one else has acquired the lock or it has been
1192 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1194 /* The address was invalid. */
1196 umtxq_lock(&uq->uq_key);
1198 umtxq_unbusy(&uq->uq_key);
1199 umtxq_unlock(&uq->uq_key);
1200 umtx_key_release(&uq->uq_key);
1205 * We set the contested bit, sleep. Otherwise the lock changed
1206 * and we need to retry or we lost a race to the thread
1207 * unlocking the umtx.
1209 umtxq_lock(&uq->uq_key);
1210 umtxq_unbusy(&uq->uq_key);
1212 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1215 umtxq_unlock(&uq->uq_key);
1216 umtx_key_release(&uq->uq_key);
1223 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1226 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags)
1228 struct umtx_key key;
1229 uint32_t owner, old, id;
1235 * Make sure we own this mtx.
1237 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1241 if ((owner & ~UMUTEX_CONTESTED) != id)
1244 if ((owner & UMUTEX_CONTESTED) == 0) {
1245 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1253 /* We should only ever be in here for contested locks */
1254 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1260 count = umtxq_count(&key);
1264 * When unlocking the umtx, it must be marked as unowned if
1265 * there is zero or one thread only waiting for it.
1266 * Otherwise, it must be marked as contested.
1268 old = casuword32(&m->m_owner, owner,
1269 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1271 umtxq_signal(&key,1);
1274 umtx_key_release(&key);
1283 * Check if the mutex is available and wake up a waiter,
1284 * only for simple mutex.
1287 do_wake_umutex(struct thread *td, struct umutex *m)
1289 struct umtx_key key;
1295 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1299 if ((owner & ~UMUTEX_CONTESTED) != 0)
1302 flags = fuword32(&m->m_flags);
1304 /* We should only ever be in here for contested locks */
1305 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1311 count = umtxq_count(&key);
1315 owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED);
1318 if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1319 umtxq_signal(&key, 1);
1322 umtx_key_release(&key);
1327 * Check if the mutex has waiters and tries to fix contention bit.
1330 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1332 struct umtx_key key;
1333 uint32_t owner, old;
1338 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
1340 type = TYPE_NORMAL_UMUTEX;
1342 case UMUTEX_PRIO_INHERIT:
1343 type = TYPE_PI_UMUTEX;
1345 case UMUTEX_PRIO_PROTECT:
1346 type = TYPE_PP_UMUTEX;
1351 if ((error = umtx_key_get(m, type, GET_SHARE(flags),
1358 count = umtxq_count(&key);
1361 * Only repair contention bit if there is a waiter, this means the mutex
1362 * is still being referenced by userland code, otherwise don't update
1366 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1367 while ((owner & UMUTEX_CONTESTED) ==0) {
1368 old = casuword32(&m->m_owner, owner,
1369 owner|UMUTEX_CONTESTED);
1374 } else if (count == 1) {
1375 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1376 while ((owner & ~UMUTEX_CONTESTED) != 0 &&
1377 (owner & UMUTEX_CONTESTED) == 0) {
1378 old = casuword32(&m->m_owner, owner,
1379 owner|UMUTEX_CONTESTED);
1388 umtxq_signal(&key, INT_MAX);
1390 else if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1391 umtxq_signal(&key, 1);
1394 umtx_key_release(&key);
1398 static inline struct umtx_pi *
1399 umtx_pi_alloc(int flags)
1403 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1404 TAILQ_INIT(&pi->pi_blocked);
1405 atomic_add_int(&umtx_pi_allocated, 1);
1410 umtx_pi_free(struct umtx_pi *pi)
1412 uma_zfree(umtx_pi_zone, pi);
1413 atomic_add_int(&umtx_pi_allocated, -1);
1417 * Adjust the thread's position on a pi_state after its priority has been
1421 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1423 struct umtx_q *uq, *uq1, *uq2;
1426 mtx_assert(&umtx_lock, MA_OWNED);
1433 * Check if the thread needs to be moved on the blocked chain.
1434 * It needs to be moved if either its priority is lower than
1435 * the previous thread or higher than the next thread.
1437 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1438 uq2 = TAILQ_NEXT(uq, uq_lockq);
1439 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1440 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1442 * Remove thread from blocked chain and determine where
1443 * it should be moved to.
1445 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1446 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1447 td1 = uq1->uq_thread;
1448 MPASS(td1->td_proc->p_magic == P_MAGIC);
1449 if (UPRI(td1) > UPRI(td))
1454 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1456 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1462 * Propagate priority when a thread is blocked on POSIX
1466 umtx_propagate_priority(struct thread *td)
1472 mtx_assert(&umtx_lock, MA_OWNED);
1475 pi = uq->uq_pi_blocked;
1481 if (td == NULL || td == curthread)
1484 MPASS(td->td_proc != NULL);
1485 MPASS(td->td_proc->p_magic == P_MAGIC);
1488 if (td->td_lend_user_pri > pri)
1489 sched_lend_user_prio(td, pri);
1497 * Pick up the lock that td is blocked on.
1500 pi = uq->uq_pi_blocked;
1503 /* Resort td on the list if needed. */
1504 umtx_pi_adjust_thread(pi, td);
1509 * Unpropagate priority for a PI mutex when a thread blocked on
1510 * it is interrupted by signal or resumed by others.
1513 umtx_repropagate_priority(struct umtx_pi *pi)
1515 struct umtx_q *uq, *uq_owner;
1516 struct umtx_pi *pi2;
1519 mtx_assert(&umtx_lock, MA_OWNED);
1521 while (pi != NULL && pi->pi_owner != NULL) {
1523 uq_owner = pi->pi_owner->td_umtxq;
1525 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1526 uq = TAILQ_FIRST(&pi2->pi_blocked);
1528 if (pri > UPRI(uq->uq_thread))
1529 pri = UPRI(uq->uq_thread);
1533 if (pri > uq_owner->uq_inherited_pri)
1534 pri = uq_owner->uq_inherited_pri;
1535 thread_lock(pi->pi_owner);
1536 sched_lend_user_prio(pi->pi_owner, pri);
1537 thread_unlock(pi->pi_owner);
1538 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1539 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1544 * Insert a PI mutex into owned list.
1547 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1549 struct umtx_q *uq_owner;
1551 uq_owner = owner->td_umtxq;
1552 mtx_assert(&umtx_lock, MA_OWNED);
1553 if (pi->pi_owner != NULL)
1554 panic("pi_ower != NULL");
1555 pi->pi_owner = owner;
1556 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1560 * Claim ownership of a PI mutex.
1563 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1565 struct umtx_q *uq, *uq_owner;
1567 uq_owner = owner->td_umtxq;
1568 mtx_lock_spin(&umtx_lock);
1569 if (pi->pi_owner == owner) {
1570 mtx_unlock_spin(&umtx_lock);
1574 if (pi->pi_owner != NULL) {
1576 * userland may have already messed the mutex, sigh.
1578 mtx_unlock_spin(&umtx_lock);
1581 umtx_pi_setowner(pi, owner);
1582 uq = TAILQ_FIRST(&pi->pi_blocked);
1586 pri = UPRI(uq->uq_thread);
1588 if (pri < UPRI(owner))
1589 sched_lend_user_prio(owner, pri);
1590 thread_unlock(owner);
1592 mtx_unlock_spin(&umtx_lock);
1597 * Adjust a thread's order position in its blocked PI mutex,
1598 * this may result new priority propagating process.
1601 umtx_pi_adjust(struct thread *td, u_char oldpri)
1607 mtx_lock_spin(&umtx_lock);
1609 * Pick up the lock that td is blocked on.
1611 pi = uq->uq_pi_blocked;
1613 umtx_pi_adjust_thread(pi, td);
1614 umtx_repropagate_priority(pi);
1616 mtx_unlock_spin(&umtx_lock);
1620 * Sleep on a PI mutex.
1623 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
1624 uint32_t owner, const char *wmesg, struct abs_timeout *timo)
1626 struct umtxq_chain *uc;
1627 struct thread *td, *td1;
1633 KASSERT(td == curthread, ("inconsistent uq_thread"));
1634 uc = umtxq_getchain(&uq->uq_key);
1635 UMTXQ_LOCKED_ASSERT(uc);
1636 UMTXQ_BUSY_ASSERT(uc);
1638 mtx_lock_spin(&umtx_lock);
1639 if (pi->pi_owner == NULL) {
1640 mtx_unlock_spin(&umtx_lock);
1641 /* XXX Only look up thread in current process. */
1642 td1 = tdfind(owner, curproc->p_pid);
1643 mtx_lock_spin(&umtx_lock);
1645 if (pi->pi_owner == NULL)
1646 umtx_pi_setowner(pi, td1);
1647 PROC_UNLOCK(td1->td_proc);
1651 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1652 pri = UPRI(uq1->uq_thread);
1658 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1660 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1662 uq->uq_pi_blocked = pi;
1664 td->td_flags |= TDF_UPIBLOCKED;
1666 umtx_propagate_priority(td);
1667 mtx_unlock_spin(&umtx_lock);
1668 umtxq_unbusy(&uq->uq_key);
1670 error = umtxq_sleep(uq, wmesg, timo);
1673 mtx_lock_spin(&umtx_lock);
1674 uq->uq_pi_blocked = NULL;
1676 td->td_flags &= ~TDF_UPIBLOCKED;
1678 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1679 umtx_repropagate_priority(pi);
1680 mtx_unlock_spin(&umtx_lock);
1681 umtxq_unlock(&uq->uq_key);
1687 * Add reference count for a PI mutex.
1690 umtx_pi_ref(struct umtx_pi *pi)
1692 struct umtxq_chain *uc;
1694 uc = umtxq_getchain(&pi->pi_key);
1695 UMTXQ_LOCKED_ASSERT(uc);
1700 * Decrease reference count for a PI mutex, if the counter
1701 * is decreased to zero, its memory space is freed.
1704 umtx_pi_unref(struct umtx_pi *pi)
1706 struct umtxq_chain *uc;
1708 uc = umtxq_getchain(&pi->pi_key);
1709 UMTXQ_LOCKED_ASSERT(uc);
1710 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1711 if (--pi->pi_refcount == 0) {
1712 mtx_lock_spin(&umtx_lock);
1713 if (pi->pi_owner != NULL) {
1714 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
1716 pi->pi_owner = NULL;
1718 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1719 ("blocked queue not empty"));
1720 mtx_unlock_spin(&umtx_lock);
1721 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1727 * Find a PI mutex in hash table.
1729 static struct umtx_pi *
1730 umtx_pi_lookup(struct umtx_key *key)
1732 struct umtxq_chain *uc;
1735 uc = umtxq_getchain(key);
1736 UMTXQ_LOCKED_ASSERT(uc);
1738 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1739 if (umtx_key_match(&pi->pi_key, key)) {
1747 * Insert a PI mutex into hash table.
1750 umtx_pi_insert(struct umtx_pi *pi)
1752 struct umtxq_chain *uc;
1754 uc = umtxq_getchain(&pi->pi_key);
1755 UMTXQ_LOCKED_ASSERT(uc);
1756 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1763 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
1764 struct _umtx_time *timeout, int try)
1766 struct abs_timeout timo;
1768 struct umtx_pi *pi, *new_pi;
1769 uint32_t id, owner, old;
1775 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1779 if (timeout != NULL)
1780 abs_timeout_init2(&timo, timeout);
1782 umtxq_lock(&uq->uq_key);
1783 pi = umtx_pi_lookup(&uq->uq_key);
1785 new_pi = umtx_pi_alloc(M_NOWAIT);
1786 if (new_pi == NULL) {
1787 umtxq_unlock(&uq->uq_key);
1788 new_pi = umtx_pi_alloc(M_WAITOK);
1789 umtxq_lock(&uq->uq_key);
1790 pi = umtx_pi_lookup(&uq->uq_key);
1792 umtx_pi_free(new_pi);
1796 if (new_pi != NULL) {
1797 new_pi->pi_key = uq->uq_key;
1798 umtx_pi_insert(new_pi);
1803 umtxq_unlock(&uq->uq_key);
1806 * Care must be exercised when dealing with umtx structure. It
1807 * can fault on any access.
1811 * Try the uncontested case. This should be done in userland.
1813 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1815 /* The acquire succeeded. */
1816 if (owner == UMUTEX_UNOWNED) {
1821 /* The address was invalid. */
1827 /* If no one owns it but it is contested try to acquire it. */
1828 if (owner == UMUTEX_CONTESTED) {
1829 owner = casuword32(&m->m_owner,
1830 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1832 if (owner == UMUTEX_CONTESTED) {
1833 umtxq_lock(&uq->uq_key);
1834 umtxq_busy(&uq->uq_key);
1835 error = umtx_pi_claim(pi, td);
1836 umtxq_unbusy(&uq->uq_key);
1837 umtxq_unlock(&uq->uq_key);
1841 /* The address was invalid. */
1847 /* If this failed the lock has changed, restart. */
1851 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1852 (owner & ~UMUTEX_CONTESTED) == id) {
1863 * If we caught a signal, we have retried and now
1869 umtxq_lock(&uq->uq_key);
1870 umtxq_busy(&uq->uq_key);
1871 umtxq_unlock(&uq->uq_key);
1874 * Set the contested bit so that a release in user space
1875 * knows to use the system call for unlock. If this fails
1876 * either some one else has acquired the lock or it has been
1879 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1881 /* The address was invalid. */
1883 umtxq_lock(&uq->uq_key);
1884 umtxq_unbusy(&uq->uq_key);
1885 umtxq_unlock(&uq->uq_key);
1890 umtxq_lock(&uq->uq_key);
1892 * We set the contested bit, sleep. Otherwise the lock changed
1893 * and we need to retry or we lost a race to the thread
1894 * unlocking the umtx.
1897 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
1898 "umtxpi", timeout == NULL ? NULL : &timo);
1900 umtxq_unbusy(&uq->uq_key);
1901 umtxq_unlock(&uq->uq_key);
1905 umtxq_lock(&uq->uq_key);
1907 umtxq_unlock(&uq->uq_key);
1909 umtx_key_release(&uq->uq_key);
1914 * Unlock a PI mutex.
1917 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
1919 struct umtx_key key;
1920 struct umtx_q *uq_first, *uq_first2, *uq_me;
1921 struct umtx_pi *pi, *pi2;
1922 uint32_t owner, old, id;
1929 * Make sure we own this mtx.
1931 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1935 if ((owner & ~UMUTEX_CONTESTED) != id)
1938 /* This should be done in userland */
1939 if ((owner & UMUTEX_CONTESTED) == 0) {
1940 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1948 /* We should only ever be in here for contested locks */
1949 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1955 count = umtxq_count_pi(&key, &uq_first);
1956 if (uq_first != NULL) {
1957 mtx_lock_spin(&umtx_lock);
1958 pi = uq_first->uq_pi_blocked;
1959 KASSERT(pi != NULL, ("pi == NULL?"));
1960 if (pi->pi_owner != curthread) {
1961 mtx_unlock_spin(&umtx_lock);
1964 umtx_key_release(&key);
1965 /* userland messed the mutex */
1968 uq_me = curthread->td_umtxq;
1969 pi->pi_owner = NULL;
1970 TAILQ_REMOVE(&uq_me->uq_pi_contested, pi, pi_link);
1971 /* get highest priority thread which is still sleeping. */
1972 uq_first = TAILQ_FIRST(&pi->pi_blocked);
1973 while (uq_first != NULL &&
1974 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
1975 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
1978 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
1979 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
1980 if (uq_first2 != NULL) {
1981 if (pri > UPRI(uq_first2->uq_thread))
1982 pri = UPRI(uq_first2->uq_thread);
1985 thread_lock(curthread);
1986 sched_lend_user_prio(curthread, pri);
1987 thread_unlock(curthread);
1988 mtx_unlock_spin(&umtx_lock);
1990 umtxq_signal_thread(uq_first);
1995 * When unlocking the umtx, it must be marked as unowned if
1996 * there is zero or one thread only waiting for it.
1997 * Otherwise, it must be marked as contested.
1999 old = casuword32(&m->m_owner, owner,
2000 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
2005 umtx_key_release(&key);
2017 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2018 struct _umtx_time *timeout, int try)
2020 struct abs_timeout timo;
2021 struct umtx_q *uq, *uq2;
2025 int error, pri, old_inherited_pri, su;
2029 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2033 if (timeout != NULL)
2034 abs_timeout_init2(&timo, timeout);
2036 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2038 old_inherited_pri = uq->uq_inherited_pri;
2039 umtxq_lock(&uq->uq_key);
2040 umtxq_busy(&uq->uq_key);
2041 umtxq_unlock(&uq->uq_key);
2043 ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]);
2044 if (ceiling > RTP_PRIO_MAX) {
2049 mtx_lock_spin(&umtx_lock);
2050 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2051 mtx_unlock_spin(&umtx_lock);
2055 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2056 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2058 if (uq->uq_inherited_pri < UPRI(td))
2059 sched_lend_user_prio(td, uq->uq_inherited_pri);
2062 mtx_unlock_spin(&umtx_lock);
2064 owner = casuword32(&m->m_owner,
2065 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2067 if (owner == UMUTEX_CONTESTED) {
2072 /* The address was invalid. */
2078 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
2079 (owner & ~UMUTEX_CONTESTED) == id) {
2090 * If we caught a signal, we have retried and now
2096 umtxq_lock(&uq->uq_key);
2098 umtxq_unbusy(&uq->uq_key);
2099 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2102 umtxq_unlock(&uq->uq_key);
2104 mtx_lock_spin(&umtx_lock);
2105 uq->uq_inherited_pri = old_inherited_pri;
2107 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2108 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2110 if (pri > UPRI(uq2->uq_thread))
2111 pri = UPRI(uq2->uq_thread);
2114 if (pri > uq->uq_inherited_pri)
2115 pri = uq->uq_inherited_pri;
2117 sched_lend_user_prio(td, pri);
2119 mtx_unlock_spin(&umtx_lock);
2123 mtx_lock_spin(&umtx_lock);
2124 uq->uq_inherited_pri = old_inherited_pri;
2126 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2127 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2129 if (pri > UPRI(uq2->uq_thread))
2130 pri = UPRI(uq2->uq_thread);
2133 if (pri > uq->uq_inherited_pri)
2134 pri = uq->uq_inherited_pri;
2136 sched_lend_user_prio(td, pri);
2138 mtx_unlock_spin(&umtx_lock);
2142 umtxq_lock(&uq->uq_key);
2143 umtxq_unbusy(&uq->uq_key);
2144 umtxq_unlock(&uq->uq_key);
2145 umtx_key_release(&uq->uq_key);
2150 * Unlock a PP mutex.
2153 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
2155 struct umtx_key key;
2156 struct umtx_q *uq, *uq2;
2160 int error, pri, new_inherited_pri, su;
2164 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2167 * Make sure we own this mtx.
2169 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
2173 if ((owner & ~UMUTEX_CONTESTED) != id)
2176 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2181 new_inherited_pri = PRI_MAX;
2183 rceiling = RTP_PRIO_MAX - rceiling;
2184 if (rceiling > RTP_PRIO_MAX)
2186 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2189 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2196 * For priority protected mutex, always set unlocked state
2197 * to UMUTEX_CONTESTED, so that userland always enters kernel
2198 * to lock the mutex, it is necessary because thread priority
2199 * has to be adjusted for such mutex.
2201 error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2206 umtxq_signal(&key, 1);
2213 mtx_lock_spin(&umtx_lock);
2215 uq->uq_inherited_pri = new_inherited_pri;
2217 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2218 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2220 if (pri > UPRI(uq2->uq_thread))
2221 pri = UPRI(uq2->uq_thread);
2224 if (pri > uq->uq_inherited_pri)
2225 pri = uq->uq_inherited_pri;
2227 sched_lend_user_prio(td, pri);
2229 mtx_unlock_spin(&umtx_lock);
2231 umtx_key_release(&key);
2236 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2237 uint32_t *old_ceiling)
2240 uint32_t save_ceiling;
2245 flags = fuword32(&m->m_flags);
2246 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2248 if (ceiling > RTP_PRIO_MAX)
2252 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2256 umtxq_lock(&uq->uq_key);
2257 umtxq_busy(&uq->uq_key);
2258 umtxq_unlock(&uq->uq_key);
2260 save_ceiling = fuword32(&m->m_ceilings[0]);
2262 owner = casuword32(&m->m_owner,
2263 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2265 if (owner == UMUTEX_CONTESTED) {
2266 suword32(&m->m_ceilings[0], ceiling);
2267 suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2273 /* The address was invalid. */
2279 if ((owner & ~UMUTEX_CONTESTED) == id) {
2280 suword32(&m->m_ceilings[0], ceiling);
2286 * If we caught a signal, we have retried and now
2293 * We set the contested bit, sleep. Otherwise the lock changed
2294 * and we need to retry or we lost a race to the thread
2295 * unlocking the umtx.
2297 umtxq_lock(&uq->uq_key);
2299 umtxq_unbusy(&uq->uq_key);
2300 error = umtxq_sleep(uq, "umtxpp", NULL);
2302 umtxq_unlock(&uq->uq_key);
2304 umtxq_lock(&uq->uq_key);
2306 umtxq_signal(&uq->uq_key, INT_MAX);
2307 umtxq_unbusy(&uq->uq_key);
2308 umtxq_unlock(&uq->uq_key);
2309 umtx_key_release(&uq->uq_key);
2310 if (error == 0 && old_ceiling != NULL)
2311 suword32(old_ceiling, save_ceiling);
2316 * Lock a userland POSIX mutex.
2319 do_lock_umutex(struct thread *td, struct umutex *m,
2320 struct _umtx_time *timeout, int mode)
2325 flags = fuword32(&m->m_flags);
2329 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2331 error = do_lock_normal(td, m, flags, timeout, mode);
2333 case UMUTEX_PRIO_INHERIT:
2334 error = do_lock_pi(td, m, flags, timeout, mode);
2336 case UMUTEX_PRIO_PROTECT:
2337 error = do_lock_pp(td, m, flags, timeout, mode);
2342 if (timeout == NULL) {
2343 if (error == EINTR && mode != _UMUTEX_WAIT)
2346 /* Timed-locking is not restarted. */
2347 if (error == ERESTART)
2354 * Unlock a userland POSIX mutex.
2357 do_unlock_umutex(struct thread *td, struct umutex *m)
2361 flags = fuword32(&m->m_flags);
2365 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2367 return (do_unlock_normal(td, m, flags));
2368 case UMUTEX_PRIO_INHERIT:
2369 return (do_unlock_pi(td, m, flags));
2370 case UMUTEX_PRIO_PROTECT:
2371 return (do_unlock_pp(td, m, flags));
2378 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2379 struct timespec *timeout, u_long wflags)
2381 struct abs_timeout timo;
2388 flags = fuword32(&cv->c_flags);
2389 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2393 if ((wflags & CVWAIT_CLOCKID) != 0) {
2394 clockid = fuword32(&cv->c_clockid);
2395 if (clockid < CLOCK_REALTIME ||
2396 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2397 /* hmm, only HW clock id will work. */
2401 clockid = CLOCK_REALTIME;
2404 umtxq_lock(&uq->uq_key);
2405 umtxq_busy(&uq->uq_key);
2407 umtxq_unlock(&uq->uq_key);
2410 * Set c_has_waiters to 1 before releasing user mutex, also
2411 * don't modify cache line when unnecessary.
2413 if (fuword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters)) == 0)
2414 suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1);
2416 umtxq_lock(&uq->uq_key);
2417 umtxq_unbusy(&uq->uq_key);
2418 umtxq_unlock(&uq->uq_key);
2420 error = do_unlock_umutex(td, m);
2422 if (timeout != NULL)
2423 abs_timeout_init(&timo, clockid, ((wflags & CVWAIT_ABSTIME) != 0),
2426 umtxq_lock(&uq->uq_key);
2428 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2432 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2436 * This must be timeout,interrupted by signal or
2437 * surprious wakeup, clear c_has_waiter flag when
2440 umtxq_busy(&uq->uq_key);
2441 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2442 int oldlen = uq->uq_cur_queue->length;
2445 umtxq_unlock(&uq->uq_key);
2447 __DEVOLATILE(uint32_t *,
2448 &cv->c_has_waiters), 0);
2449 umtxq_lock(&uq->uq_key);
2452 umtxq_unbusy(&uq->uq_key);
2453 if (error == ERESTART)
2457 umtxq_unlock(&uq->uq_key);
2458 umtx_key_release(&uq->uq_key);
2463 * Signal a userland condition variable.
2466 do_cv_signal(struct thread *td, struct ucond *cv)
2468 struct umtx_key key;
2469 int error, cnt, nwake;
2472 flags = fuword32(&cv->c_flags);
2473 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2477 cnt = umtxq_count(&key);
2478 nwake = umtxq_signal(&key, 1);
2482 __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2487 umtx_key_release(&key);
2492 do_cv_broadcast(struct thread *td, struct ucond *cv)
2494 struct umtx_key key;
2498 flags = fuword32(&cv->c_flags);
2499 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2504 umtxq_signal(&key, INT_MAX);
2507 error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2513 umtx_key_release(&key);
2518 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
2520 struct abs_timeout timo;
2522 uint32_t flags, wrflags;
2523 int32_t state, oldstate;
2524 int32_t blocked_readers;
2528 flags = fuword32(&rwlock->rw_flags);
2529 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2533 if (timeout != NULL)
2534 abs_timeout_init2(&timo, timeout);
2536 wrflags = URWLOCK_WRITE_OWNER;
2537 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2538 wrflags |= URWLOCK_WRITE_WAITERS;
2541 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2542 /* try to lock it */
2543 while (!(state & wrflags)) {
2544 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2545 umtx_key_release(&uq->uq_key);
2548 oldstate = casuword32(&rwlock->rw_state, state, state + 1);
2549 if (oldstate == state) {
2550 umtx_key_release(&uq->uq_key);
2559 /* grab monitor lock */
2560 umtxq_lock(&uq->uq_key);
2561 umtxq_busy(&uq->uq_key);
2562 umtxq_unlock(&uq->uq_key);
2565 * re-read the state, in case it changed between the try-lock above
2566 * and the check below
2568 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2570 /* set read contention bit */
2571 while ((state & wrflags) && !(state & URWLOCK_READ_WAITERS)) {
2572 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_READ_WAITERS);
2573 if (oldstate == state)
2578 /* state is changed while setting flags, restart */
2579 if (!(state & wrflags)) {
2580 umtxq_lock(&uq->uq_key);
2581 umtxq_unbusy(&uq->uq_key);
2582 umtxq_unlock(&uq->uq_key);
2587 /* contention bit is set, before sleeping, increase read waiter count */
2588 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2589 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2591 while (state & wrflags) {
2592 umtxq_lock(&uq->uq_key);
2594 umtxq_unbusy(&uq->uq_key);
2596 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2599 umtxq_busy(&uq->uq_key);
2601 umtxq_unlock(&uq->uq_key);
2604 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2607 /* decrease read waiter count, and may clear read contention bit */
2608 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2609 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2610 if (blocked_readers == 1) {
2611 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2613 oldstate = casuword32(&rwlock->rw_state, state,
2614 state & ~URWLOCK_READ_WAITERS);
2615 if (oldstate == state)
2621 umtxq_lock(&uq->uq_key);
2622 umtxq_unbusy(&uq->uq_key);
2623 umtxq_unlock(&uq->uq_key);
2625 umtx_key_release(&uq->uq_key);
2626 if (error == ERESTART)
2632 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
2634 struct abs_timeout timo;
2637 int32_t state, oldstate;
2638 int32_t blocked_writers;
2639 int32_t blocked_readers;
2643 flags = fuword32(&rwlock->rw_flags);
2644 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2648 if (timeout != NULL)
2649 abs_timeout_init2(&timo, timeout);
2651 blocked_readers = 0;
2653 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2654 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2655 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER);
2656 if (oldstate == state) {
2657 umtx_key_release(&uq->uq_key);
2664 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2665 blocked_readers != 0) {
2666 umtxq_lock(&uq->uq_key);
2667 umtxq_busy(&uq->uq_key);
2668 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2669 umtxq_unbusy(&uq->uq_key);
2670 umtxq_unlock(&uq->uq_key);
2676 /* grab monitor lock */
2677 umtxq_lock(&uq->uq_key);
2678 umtxq_busy(&uq->uq_key);
2679 umtxq_unlock(&uq->uq_key);
2682 * re-read the state, in case it changed between the try-lock above
2683 * and the check below
2685 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2687 while (((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) &&
2688 (state & URWLOCK_WRITE_WAITERS) == 0) {
2689 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_WAITERS);
2690 if (oldstate == state)
2695 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2696 umtxq_lock(&uq->uq_key);
2697 umtxq_unbusy(&uq->uq_key);
2698 umtxq_unlock(&uq->uq_key);
2702 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2703 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2705 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2706 umtxq_lock(&uq->uq_key);
2707 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2708 umtxq_unbusy(&uq->uq_key);
2710 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
2713 umtxq_busy(&uq->uq_key);
2714 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2715 umtxq_unlock(&uq->uq_key);
2718 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2721 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2722 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2723 if (blocked_writers == 1) {
2724 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2726 oldstate = casuword32(&rwlock->rw_state, state,
2727 state & ~URWLOCK_WRITE_WAITERS);
2728 if (oldstate == state)
2732 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2734 blocked_readers = 0;
2736 umtxq_lock(&uq->uq_key);
2737 umtxq_unbusy(&uq->uq_key);
2738 umtxq_unlock(&uq->uq_key);
2741 umtx_key_release(&uq->uq_key);
2742 if (error == ERESTART)
2748 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
2752 int32_t state, oldstate;
2753 int error, q, count;
2756 flags = fuword32(&rwlock->rw_flags);
2757 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2761 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2762 if (state & URWLOCK_WRITE_OWNER) {
2764 oldstate = casuword32(&rwlock->rw_state, state,
2765 state & ~URWLOCK_WRITE_OWNER);
2766 if (oldstate != state) {
2768 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
2775 } else if (URWLOCK_READER_COUNT(state) != 0) {
2777 oldstate = casuword32(&rwlock->rw_state, state,
2779 if (oldstate != state) {
2781 if (URWLOCK_READER_COUNT(oldstate) == 0) {
2796 if (!(flags & URWLOCK_PREFER_READER)) {
2797 if (state & URWLOCK_WRITE_WAITERS) {
2799 q = UMTX_EXCLUSIVE_QUEUE;
2800 } else if (state & URWLOCK_READ_WAITERS) {
2802 q = UMTX_SHARED_QUEUE;
2805 if (state & URWLOCK_READ_WAITERS) {
2807 q = UMTX_SHARED_QUEUE;
2808 } else if (state & URWLOCK_WRITE_WAITERS) {
2810 q = UMTX_EXCLUSIVE_QUEUE;
2815 umtxq_lock(&uq->uq_key);
2816 umtxq_busy(&uq->uq_key);
2817 umtxq_signal_queue(&uq->uq_key, count, q);
2818 umtxq_unbusy(&uq->uq_key);
2819 umtxq_unlock(&uq->uq_key);
2822 umtx_key_release(&uq->uq_key);
2827 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
2829 struct abs_timeout timo;
2831 uint32_t flags, count;
2835 flags = fuword32(&sem->_flags);
2836 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
2840 if (timeout != NULL)
2841 abs_timeout_init2(&timo, timeout);
2843 umtxq_lock(&uq->uq_key);
2844 umtxq_busy(&uq->uq_key);
2846 umtxq_unlock(&uq->uq_key);
2847 casuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0, 1);
2848 count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count));
2850 umtxq_lock(&uq->uq_key);
2851 umtxq_unbusy(&uq->uq_key);
2853 umtxq_unlock(&uq->uq_key);
2854 umtx_key_release(&uq->uq_key);
2857 umtxq_lock(&uq->uq_key);
2858 umtxq_unbusy(&uq->uq_key);
2860 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
2862 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2866 if (error == ERESTART)
2869 umtxq_unlock(&uq->uq_key);
2870 umtx_key_release(&uq->uq_key);
2875 * Signal a userland condition variable.
2878 do_sem_wake(struct thread *td, struct _usem *sem)
2880 struct umtx_key key;
2884 flags = fuword32(&sem->_flags);
2885 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
2889 cnt = umtxq_count(&key);
2891 umtxq_signal(&key, 1);
2893 * Check if count is greater than 0, this means the memory is
2894 * still being referenced by user code, so we can safely
2895 * update _has_waiters flag.
2900 __DEVOLATILE(uint32_t *, &sem->_has_waiters), 0);
2906 umtx_key_release(&key);
2911 sys__umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
2912 /* struct umtx *umtx */
2914 return do_lock_umtx(td, uap->umtx, td->td_tid, 0);
2918 sys__umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
2919 /* struct umtx *umtx */
2921 return do_unlock_umtx(td, uap->umtx, td->td_tid);
2925 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
2929 error = copyin(addr, tsp, sizeof(struct timespec));
2931 if (tsp->tv_sec < 0 ||
2932 tsp->tv_nsec >= 1000000000 ||
2940 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
2944 if (size <= sizeof(struct timespec)) {
2945 tp->_clockid = CLOCK_REALTIME;
2947 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
2949 error = copyin(addr, tp, sizeof(struct _umtx_time));
2952 if (tp->_timeout.tv_sec < 0 ||
2953 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
2959 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap)
2961 struct timespec *ts, timeout;
2964 /* Allow a null timespec (wait forever). */
2965 if (uap->uaddr2 == NULL)
2968 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
2973 return (do_lock_umtx(td, uap->obj, uap->val, ts));
2977 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap)
2979 return (do_unlock_umtx(td, uap->obj, uap->val));
2983 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
2985 struct _umtx_time timeout, *tm_p;
2988 if (uap->uaddr2 == NULL)
2991 error = umtx_copyin_umtx_time(
2992 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
2997 return do_wait(td, uap->obj, uap->val, tm_p, 0, 0);
3001 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
3003 struct _umtx_time timeout, *tm_p;
3006 if (uap->uaddr2 == NULL)
3009 error = umtx_copyin_umtx_time(
3010 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3015 return do_wait(td, uap->obj, uap->val, tm_p, 1, 0);
3019 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3021 struct _umtx_time *tm_p, timeout;
3024 if (uap->uaddr2 == NULL)
3027 error = umtx_copyin_umtx_time(
3028 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3033 return do_wait(td, uap->obj, uap->val, tm_p, 1, 1);
3037 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3039 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3042 #define BATCH_SIZE 128
3044 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3046 int count = uap->val;
3047 void *uaddrs[BATCH_SIZE];
3048 char **upp = (char **)uap->obj;
3055 if (tocopy > BATCH_SIZE)
3056 tocopy = BATCH_SIZE;
3057 error = copyin(upp+pos, uaddrs, tocopy * sizeof(char *));
3060 for (i = 0; i < tocopy; ++i)
3061 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3069 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3071 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3075 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3077 struct _umtx_time *tm_p, timeout;
3080 /* Allow a null timespec (wait forever). */
3081 if (uap->uaddr2 == NULL)
3084 error = umtx_copyin_umtx_time(
3085 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3090 return do_lock_umutex(td, uap->obj, tm_p, 0);
3094 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3096 return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY);
3100 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3102 struct _umtx_time *tm_p, timeout;
3105 /* Allow a null timespec (wait forever). */
3106 if (uap->uaddr2 == NULL)
3109 error = umtx_copyin_umtx_time(
3110 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3115 return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT);
3119 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3121 return do_wake_umutex(td, uap->obj);
3125 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3127 return do_unlock_umutex(td, uap->obj);
3131 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3133 return do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1);
3137 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3139 struct timespec *ts, timeout;
3142 /* Allow a null timespec (wait forever). */
3143 if (uap->uaddr2 == NULL)
3146 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3151 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3155 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3157 return do_cv_signal(td, uap->obj);
3161 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3163 return do_cv_broadcast(td, uap->obj);
3167 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3169 struct _umtx_time timeout;
3172 /* Allow a null timespec (wait forever). */
3173 if (uap->uaddr2 == NULL) {
3174 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3176 error = umtx_copyin_umtx_time(uap->uaddr2,
3177 (size_t)uap->uaddr1, &timeout);
3180 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3186 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3188 struct _umtx_time timeout;
3191 /* Allow a null timespec (wait forever). */
3192 if (uap->uaddr2 == NULL) {
3193 error = do_rw_wrlock(td, uap->obj, 0);
3195 error = umtx_copyin_umtx_time(uap->uaddr2,
3196 (size_t)uap->uaddr1, &timeout);
3200 error = do_rw_wrlock(td, uap->obj, &timeout);
3206 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3208 return do_rw_unlock(td, uap->obj);
3212 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3214 struct _umtx_time *tm_p, timeout;
3217 /* Allow a null timespec (wait forever). */
3218 if (uap->uaddr2 == NULL)
3221 error = umtx_copyin_umtx_time(
3222 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3227 return (do_sem_wait(td, uap->obj, tm_p));
3231 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3233 return do_sem_wake(td, uap->obj);
3237 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3239 return do_wake2_umutex(td, uap->obj, uap->val);
3242 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3244 static _umtx_op_func op_table[] = {
3245 __umtx_op_lock_umtx, /* UMTX_OP_LOCK */
3246 __umtx_op_unlock_umtx, /* UMTX_OP_UNLOCK */
3247 __umtx_op_wait, /* UMTX_OP_WAIT */
3248 __umtx_op_wake, /* UMTX_OP_WAKE */
3249 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_TRYLOCK */
3250 __umtx_op_lock_umutex, /* UMTX_OP_MUTEX_LOCK */
3251 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
3252 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
3253 __umtx_op_cv_wait, /* UMTX_OP_CV_WAIT*/
3254 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
3255 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
3256 __umtx_op_wait_uint, /* UMTX_OP_WAIT_UINT */
3257 __umtx_op_rw_rdlock, /* UMTX_OP_RW_RDLOCK */
3258 __umtx_op_rw_wrlock, /* UMTX_OP_RW_WRLOCK */
3259 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
3260 __umtx_op_wait_uint_private, /* UMTX_OP_WAIT_UINT_PRIVATE */
3261 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
3262 __umtx_op_wait_umutex, /* UMTX_OP_UMUTEX_WAIT */
3263 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
3264 __umtx_op_sem_wait, /* UMTX_OP_SEM_WAIT */
3265 __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */
3266 __umtx_op_nwake_private, /* UMTX_OP_NWAKE_PRIVATE */
3267 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
3271 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
3273 if ((unsigned)uap->op < UMTX_OP_MAX)
3274 return (*op_table[uap->op])(td, uap);
3278 #ifdef COMPAT_FREEBSD32
3280 freebsd32_umtx_lock(struct thread *td, struct freebsd32_umtx_lock_args *uap)
3281 /* struct umtx *umtx */
3283 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
3287 freebsd32_umtx_unlock(struct thread *td, struct freebsd32_umtx_unlock_args *uap)
3288 /* struct umtx *umtx */
3290 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
3298 struct umtx_time32 {
3299 struct timespec32 timeout;
3305 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
3307 struct timespec32 ts32;
3310 error = copyin(addr, &ts32, sizeof(struct timespec32));
3312 if (ts32.tv_sec < 0 ||
3313 ts32.tv_nsec >= 1000000000 ||
3317 tsp->tv_sec = ts32.tv_sec;
3318 tsp->tv_nsec = ts32.tv_nsec;
3325 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
3327 struct umtx_time32 t32;
3330 t32.clockid = CLOCK_REALTIME;
3332 if (size <= sizeof(struct timespec32))
3333 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
3335 error = copyin(addr, &t32, sizeof(struct umtx_time32));
3338 if (t32.timeout.tv_sec < 0 ||
3339 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
3341 tp->_timeout.tv_sec = t32.timeout.tv_sec;
3342 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
3343 tp->_flags = t32.flags;
3344 tp->_clockid = t32.clockid;
3349 __umtx_op_lock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3351 struct timespec *ts, timeout;
3354 /* Allow a null timespec (wait forever). */
3355 if (uap->uaddr2 == NULL)
3358 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3363 return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3367 __umtx_op_unlock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3369 return (do_unlock_umtx32(td, uap->obj, (uint32_t)uap->val));
3373 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3375 struct _umtx_time *tm_p, timeout;
3378 if (uap->uaddr2 == NULL)
3381 error = umtx_copyin_umtx_time32(uap->uaddr2,
3382 (size_t)uap->uaddr1, &timeout);
3387 return do_wait(td, uap->obj, uap->val, tm_p, 1, 0);
3391 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3393 struct _umtx_time *tm_p, timeout;
3396 /* Allow a null timespec (wait forever). */
3397 if (uap->uaddr2 == NULL)
3400 error = umtx_copyin_umtx_time(uap->uaddr2,
3401 (size_t)uap->uaddr1, &timeout);
3406 return do_lock_umutex(td, uap->obj, tm_p, 0);
3410 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3412 struct _umtx_time *tm_p, timeout;
3415 /* Allow a null timespec (wait forever). */
3416 if (uap->uaddr2 == NULL)
3419 error = umtx_copyin_umtx_time32(uap->uaddr2,
3420 (size_t)uap->uaddr1, &timeout);
3425 return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT);
3429 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3431 struct timespec *ts, timeout;
3434 /* Allow a null timespec (wait forever). */
3435 if (uap->uaddr2 == NULL)
3438 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3443 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3447 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3449 struct _umtx_time timeout;
3452 /* Allow a null timespec (wait forever). */
3453 if (uap->uaddr2 == NULL) {
3454 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3456 error = umtx_copyin_umtx_time32(uap->uaddr2,
3457 (size_t)uap->uaddr1, &timeout);
3460 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3466 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3468 struct _umtx_time timeout;
3471 /* Allow a null timespec (wait forever). */
3472 if (uap->uaddr2 == NULL) {
3473 error = do_rw_wrlock(td, uap->obj, 0);
3475 error = umtx_copyin_umtx_time32(uap->uaddr2,
3476 (size_t)uap->uaddr1, &timeout);
3479 error = do_rw_wrlock(td, uap->obj, &timeout);
3485 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3487 struct _umtx_time *tm_p, timeout;
3490 if (uap->uaddr2 == NULL)
3493 error = umtx_copyin_umtx_time32(
3494 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
3499 return do_wait(td, uap->obj, uap->val, tm_p, 1, 1);
3503 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3505 struct _umtx_time *tm_p, timeout;
3508 /* Allow a null timespec (wait forever). */
3509 if (uap->uaddr2 == NULL)
3512 error = umtx_copyin_umtx_time32(uap->uaddr2,
3513 (size_t)uap->uaddr1, &timeout);
3518 return (do_sem_wait(td, uap->obj, tm_p));
3522 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
3524 int count = uap->val;
3525 uint32_t uaddrs[BATCH_SIZE];
3526 uint32_t **upp = (uint32_t **)uap->obj;
3533 if (tocopy > BATCH_SIZE)
3534 tocopy = BATCH_SIZE;
3535 error = copyin(upp+pos, uaddrs, tocopy * sizeof(uint32_t));
3538 for (i = 0; i < tocopy; ++i)
3539 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
3547 static _umtx_op_func op_table_compat32[] = {
3548 __umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */
3549 __umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */
3550 __umtx_op_wait_compat32, /* UMTX_OP_WAIT */
3551 __umtx_op_wake, /* UMTX_OP_WAKE */
3552 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_LOCK */
3553 __umtx_op_lock_umutex_compat32, /* UMTX_OP_MUTEX_TRYLOCK */
3554 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
3555 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
3556 __umtx_op_cv_wait_compat32, /* UMTX_OP_CV_WAIT*/
3557 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
3558 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
3559 __umtx_op_wait_compat32, /* UMTX_OP_WAIT_UINT */
3560 __umtx_op_rw_rdlock_compat32, /* UMTX_OP_RW_RDLOCK */
3561 __umtx_op_rw_wrlock_compat32, /* UMTX_OP_RW_WRLOCK */
3562 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
3563 __umtx_op_wait_uint_private_compat32, /* UMTX_OP_WAIT_UINT_PRIVATE */
3564 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
3565 __umtx_op_wait_umutex_compat32, /* UMTX_OP_UMUTEX_WAIT */
3566 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
3567 __umtx_op_sem_wait_compat32, /* UMTX_OP_SEM_WAIT */
3568 __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */
3569 __umtx_op_nwake_private32, /* UMTX_OP_NWAKE_PRIVATE */
3570 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
3574 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
3576 if ((unsigned)uap->op < UMTX_OP_MAX)
3577 return (*op_table_compat32[uap->op])(td,
3578 (struct _umtx_op_args *)uap);
3584 umtx_thread_init(struct thread *td)
3586 td->td_umtxq = umtxq_alloc();
3587 td->td_umtxq->uq_thread = td;
3591 umtx_thread_fini(struct thread *td)
3593 umtxq_free(td->td_umtxq);
3597 * It will be called when new thread is created, e.g fork().
3600 umtx_thread_alloc(struct thread *td)
3605 uq->uq_inherited_pri = PRI_MAX;
3607 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
3608 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
3609 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
3610 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
3617 umtx_exec_hook(void *arg __unused, struct proc *p __unused,
3618 struct image_params *imgp __unused)
3620 umtx_thread_cleanup(curthread);
3624 * thread_exit() hook.
3627 umtx_thread_exit(struct thread *td)
3629 umtx_thread_cleanup(td);
3633 * clean up umtx data.
3636 umtx_thread_cleanup(struct thread *td)
3641 if ((uq = td->td_umtxq) == NULL)
3644 mtx_lock_spin(&umtx_lock);
3645 uq->uq_inherited_pri = PRI_MAX;
3646 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
3647 pi->pi_owner = NULL;
3648 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
3650 mtx_unlock_spin(&umtx_lock);
3652 sched_lend_user_prio(td, PRI_MAX);