2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_compat.h"
32 #include "opt_umtx_profiling.h"
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
45 #include <sys/sysent.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/eventhandler.h>
52 #include <vm/vm_param.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_object.h>
57 #include <machine/cpu.h>
59 #ifdef COMPAT_FREEBSD32
60 #include <compat/freebsd32/freebsd32_proto.h>
64 #define _UMUTEX_WAIT 2
66 /* Priority inheritance mutex info. */
69 struct thread *pi_owner;
74 /* List entry to link umtx holding by thread */
75 TAILQ_ENTRY(umtx_pi) pi_link;
77 /* List entry in hash */
78 TAILQ_ENTRY(umtx_pi) pi_hashlink;
80 /* List for waiters */
81 TAILQ_HEAD(,umtx_q) pi_blocked;
83 /* Identify a userland lock object */
84 struct umtx_key pi_key;
87 /* A userland synchronous object user. */
89 /* Linked list for the hash. */
90 TAILQ_ENTRY(umtx_q) uq_link;
93 struct umtx_key uq_key;
97 #define UQF_UMTXQ 0x0001
99 /* The thread waits on. */
100 struct thread *uq_thread;
103 * Blocked on PI mutex. read can use chain lock
104 * or umtx_lock, write must have both chain lock and
105 * umtx_lock being hold.
107 struct umtx_pi *uq_pi_blocked;
109 /* On blocked list */
110 TAILQ_ENTRY(umtx_q) uq_lockq;
112 /* Thread contending with us */
113 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
115 /* Inherited priority from PP mutex */
116 u_char uq_inherited_pri;
119 TAILQ_HEAD(umtxq_head, umtx_q);
121 /* Userland lock object's wait-queue chain */
123 /* Lock for this chain. */
126 /* List of sleep queues. */
127 struct umtxq_head uc_queue[2];
128 #define UMTX_SHARED_QUEUE 0
129 #define UMTX_EXCLUSIVE_QUEUE 1
134 /* Chain lock waiters */
137 /* All PI in the list */
138 TAILQ_HEAD(,umtx_pi) uc_pi_list;
139 #ifdef UMTX_PROFILING
145 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
146 #define UMTXQ_BUSY_ASSERT(uc) KASSERT(&(uc)->uc_busy, ("umtx chain is not busy"))
149 * Don't propagate time-sharing priority, there is a security reason,
150 * a user can simply introduce PI-mutex, let thread A lock the mutex,
151 * and let another thread B block on the mutex, because B is
152 * sleeping, its priority will be boosted, this causes A's priority to
153 * be boosted via priority propagating too and will never be lowered even
154 * if it is using 100%CPU, this is unfair to other processes.
157 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
158 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
159 PRI_MAX_TIMESHARE : (td)->td_user_pri)
161 #define GOLDEN_RATIO_PRIME 2654404609U
162 #define UMTX_CHAINS 128
163 #define UMTX_SHIFTS (__WORD_BIT - 7)
165 #define GET_SHARE(flags) \
166 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
168 #define BUSY_SPINS 200
170 static uma_zone_t umtx_pi_zone;
171 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
172 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
173 static int umtx_pi_allocated;
175 SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
176 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
177 &umtx_pi_allocated, 0, "Allocated umtx_pi");
179 #ifdef UMTX_PROFILING
180 static long max_length;
181 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
182 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
185 static void umtxq_sysinit(void *);
186 static void umtxq_hash(struct umtx_key *key);
187 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
188 static void umtxq_lock(struct umtx_key *key);
189 static void umtxq_unlock(struct umtx_key *key);
190 static void umtxq_busy(struct umtx_key *key);
191 static void umtxq_unbusy(struct umtx_key *key);
192 static void umtxq_insert_queue(struct umtx_q *uq, int q);
193 static void umtxq_remove_queue(struct umtx_q *uq, int q);
194 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo);
195 static int umtxq_count(struct umtx_key *key);
196 static struct umtx_pi *umtx_pi_alloc(int);
197 static void umtx_pi_free(struct umtx_pi *pi);
198 static void umtx_pi_adjust_locked(struct thread *td, u_char oldpri);
199 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags);
200 static void umtx_thread_cleanup(struct thread *td);
201 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
202 struct image_params *imgp __unused);
203 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
205 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
206 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
207 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
209 static struct mtx umtx_lock;
211 #ifdef UMTX_PROFILING
213 umtx_init_profiling(void)
215 struct sysctl_oid *chain_oid;
219 for (i = 0; i < UMTX_CHAINS; ++i) {
220 snprintf(chain_name, sizeof(chain_name), "%d", i);
221 chain_oid = SYSCTL_ADD_NODE(NULL,
222 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
223 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
224 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
225 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
226 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
227 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
233 umtxq_sysinit(void *arg __unused)
237 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
238 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
239 for (i = 0; i < 2; ++i) {
240 for (j = 0; j < UMTX_CHAINS; ++j) {
241 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
242 MTX_DEF | MTX_DUPOK);
243 TAILQ_INIT(&umtxq_chains[i][j].uc_queue[0]);
244 TAILQ_INIT(&umtxq_chains[i][j].uc_queue[1]);
245 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
246 umtxq_chains[i][j].uc_busy = 0;
247 umtxq_chains[i][j].uc_waiters = 0;
248 #ifdef UMTX_PROFILING
249 umtxq_chains[i][j].length = 0;
250 umtxq_chains[i][j].max_length = 0;
254 #ifdef UMTX_PROFILING
255 umtx_init_profiling();
257 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN);
258 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
259 EVENTHANDLER_PRI_ANY);
267 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
268 TAILQ_INIT(&uq->uq_pi_contested);
269 uq->uq_inherited_pri = PRI_MAX;
274 umtxq_free(struct umtx_q *uq)
280 umtxq_hash(struct umtx_key *key)
282 unsigned n = (uintptr_t)key->info.both.a + key->info.both.b;
283 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
286 static inline struct umtxq_chain *
287 umtxq_getchain(struct umtx_key *key)
289 if (key->type <= TYPE_CV)
290 return (&umtxq_chains[1][key->hash]);
291 return (&umtxq_chains[0][key->hash]);
298 umtxq_lock(struct umtx_key *key)
300 struct umtxq_chain *uc;
302 uc = umtxq_getchain(key);
303 mtx_lock(&uc->uc_lock);
310 umtxq_unlock(struct umtx_key *key)
312 struct umtxq_chain *uc;
314 uc = umtxq_getchain(key);
315 mtx_unlock(&uc->uc_lock);
319 * Set chain to busy state when following operation
320 * may be blocked (kernel mutex can not be used).
323 umtxq_busy(struct umtx_key *key)
325 struct umtxq_chain *uc;
327 uc = umtxq_getchain(key);
328 mtx_assert(&uc->uc_lock, MA_OWNED);
332 int count = BUSY_SPINS;
335 while (uc->uc_busy && --count > 0)
341 while (uc->uc_busy) {
343 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
354 umtxq_unbusy(struct umtx_key *key)
356 struct umtxq_chain *uc;
358 uc = umtxq_getchain(key);
359 mtx_assert(&uc->uc_lock, MA_OWNED);
360 KASSERT(uc->uc_busy != 0, ("not busy"));
367 umtxq_insert_queue(struct umtx_q *uq, int q)
369 struct umtxq_chain *uc;
371 uc = umtxq_getchain(&uq->uq_key);
372 UMTXQ_LOCKED_ASSERT(uc);
373 TAILQ_INSERT_TAIL(&uc->uc_queue[q], uq, uq_link);
374 #ifdef UMTX_PROFILING
376 if (uc->length > uc->max_length) {
377 uc->max_length = uc->length;
378 if (uc->max_length > max_length)
379 max_length = uc->max_length;
382 uq->uq_flags |= UQF_UMTXQ;
386 umtxq_remove_queue(struct umtx_q *uq, int q)
388 struct umtxq_chain *uc;
390 uc = umtxq_getchain(&uq->uq_key);
391 UMTXQ_LOCKED_ASSERT(uc);
392 if (uq->uq_flags & UQF_UMTXQ) {
393 TAILQ_REMOVE(&uc->uc_queue[q], uq, uq_link);
394 #ifdef UMTX_PROFILING
397 uq->uq_flags &= ~UQF_UMTXQ;
402 * Check if there are multiple waiters
405 umtxq_count(struct umtx_key *key)
407 struct umtxq_chain *uc;
411 uc = umtxq_getchain(key);
412 UMTXQ_LOCKED_ASSERT(uc);
413 TAILQ_FOREACH(uq, &uc->uc_queue[UMTX_SHARED_QUEUE], uq_link) {
414 if (umtx_key_match(&uq->uq_key, key)) {
423 * Check if there are multiple PI waiters and returns first
427 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
429 struct umtxq_chain *uc;
434 uc = umtxq_getchain(key);
435 UMTXQ_LOCKED_ASSERT(uc);
436 TAILQ_FOREACH(uq, &uc->uc_queue[UMTX_SHARED_QUEUE], uq_link) {
437 if (umtx_key_match(&uq->uq_key, key)) {
447 * Wake up threads waiting on an userland object.
451 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
453 struct umtxq_chain *uc;
454 struct umtx_q *uq, *next;
458 uc = umtxq_getchain(key);
459 UMTXQ_LOCKED_ASSERT(uc);
460 TAILQ_FOREACH_SAFE(uq, &uc->uc_queue[q], uq_link, next) {
461 if (umtx_key_match(&uq->uq_key, key)) {
462 umtxq_remove_queue(uq, q);
473 * Wake up specified thread.
476 umtxq_signal_thread(struct umtx_q *uq)
478 struct umtxq_chain *uc;
480 uc = umtxq_getchain(&uq->uq_key);
481 UMTXQ_LOCKED_ASSERT(uc);
487 * Put thread into sleep state, before sleeping, check if
488 * thread was removed from umtx queue.
491 umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo)
493 struct umtxq_chain *uc;
496 uc = umtxq_getchain(&uq->uq_key);
497 UMTXQ_LOCKED_ASSERT(uc);
498 if (!(uq->uq_flags & UQF_UMTXQ))
500 error = msleep(uq, &uc->uc_lock, PCATCH, wmesg, timo);
501 if (error == EWOULDBLOCK)
507 * Convert userspace address into unique logical address.
510 umtx_key_get(void *addr, int type, int share, struct umtx_key *key)
512 struct thread *td = curthread;
514 vm_map_entry_t entry;
520 if (share == THREAD_SHARE) {
522 key->info.private.vs = td->td_proc->p_vmspace;
523 key->info.private.addr = (uintptr_t)addr;
525 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
526 map = &td->td_proc->p_vmspace->vm_map;
527 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
528 &entry, &key->info.shared.object, &pindex, &prot,
529 &wired) != KERN_SUCCESS) {
533 if ((share == PROCESS_SHARE) ||
534 (share == AUTO_SHARE &&
535 VM_INHERIT_SHARE == entry->inheritance)) {
537 key->info.shared.offset = entry->offset + entry->start -
539 vm_object_reference(key->info.shared.object);
542 key->info.private.vs = td->td_proc->p_vmspace;
543 key->info.private.addr = (uintptr_t)addr;
545 vm_map_lookup_done(map, entry);
556 umtx_key_release(struct umtx_key *key)
559 vm_object_deallocate(key->info.shared.object);
563 * Lock a umtx object.
566 _do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id, int timo)
576 * Care must be exercised when dealing with umtx structure. It
577 * can fault on any access.
581 * Try the uncontested case. This should be done in userland.
583 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id);
585 /* The acquire succeeded. */
586 if (owner == UMTX_UNOWNED)
589 /* The address was invalid. */
593 /* If no one owns it but it is contested try to acquire it. */
594 if (owner == UMTX_CONTESTED) {
595 owner = casuword(&umtx->u_owner,
596 UMTX_CONTESTED, id | UMTX_CONTESTED);
598 if (owner == UMTX_CONTESTED)
601 /* The address was invalid. */
605 /* If this failed the lock has changed, restart. */
610 * If we caught a signal, we have retried and now
616 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK,
617 AUTO_SHARE, &uq->uq_key)) != 0)
620 umtxq_lock(&uq->uq_key);
621 umtxq_busy(&uq->uq_key);
623 umtxq_unbusy(&uq->uq_key);
624 umtxq_unlock(&uq->uq_key);
627 * Set the contested bit so that a release in user space
628 * knows to use the system call for unlock. If this fails
629 * either some one else has acquired the lock or it has been
632 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
634 /* The address was invalid. */
636 umtxq_lock(&uq->uq_key);
638 umtxq_unlock(&uq->uq_key);
639 umtx_key_release(&uq->uq_key);
644 * We set the contested bit, sleep. Otherwise the lock changed
645 * and we need to retry or we lost a race to the thread
646 * unlocking the umtx.
648 umtxq_lock(&uq->uq_key);
650 error = umtxq_sleep(uq, "umtx", timo);
652 umtxq_unlock(&uq->uq_key);
653 umtx_key_release(&uq->uq_key);
660 * Lock a umtx object.
663 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id,
664 struct timespec *timeout)
666 struct timespec ts, ts2, ts3;
670 if (timeout == NULL) {
671 error = _do_lock_umtx(td, umtx, id, 0);
672 /* Mutex locking is restarted if it is interrupted. */
677 timespecadd(&ts, timeout);
678 TIMESPEC_TO_TIMEVAL(&tv, timeout);
680 error = _do_lock_umtx(td, umtx, id, tvtohz(&tv));
681 if (error != ETIMEDOUT)
684 if (timespeccmp(&ts2, &ts, >=)) {
689 timespecsub(&ts3, &ts2);
690 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
692 /* Timed-locking is not restarted. */
693 if (error == ERESTART)
700 * Unlock a umtx object.
703 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
712 * Make sure we own this mtx.
714 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
718 if ((owner & ~UMTX_CONTESTED) != id)
721 /* This should be done in userland */
722 if ((owner & UMTX_CONTESTED) == 0) {
723 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
731 /* We should only ever be in here for contested locks */
732 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
738 count = umtxq_count(&key);
742 * When unlocking the umtx, it must be marked as unowned if
743 * there is zero or one thread only waiting for it.
744 * Otherwise, it must be marked as contested.
746 old = casuword(&umtx->u_owner, owner,
747 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
749 umtxq_signal(&key,1);
752 umtx_key_release(&key);
760 #ifdef COMPAT_FREEBSD32
763 * Lock a umtx object.
766 _do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id, int timo)
776 * Care must be exercised when dealing with umtx structure. It
777 * can fault on any access.
781 * Try the uncontested case. This should be done in userland.
783 owner = casuword32(m, UMUTEX_UNOWNED, id);
785 /* The acquire succeeded. */
786 if (owner == UMUTEX_UNOWNED)
789 /* The address was invalid. */
793 /* If no one owns it but it is contested try to acquire it. */
794 if (owner == UMUTEX_CONTESTED) {
795 owner = casuword32(m,
796 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
797 if (owner == UMUTEX_CONTESTED)
800 /* The address was invalid. */
804 /* If this failed the lock has changed, restart. */
809 * If we caught a signal, we have retried and now
815 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK,
816 AUTO_SHARE, &uq->uq_key)) != 0)
819 umtxq_lock(&uq->uq_key);
820 umtxq_busy(&uq->uq_key);
822 umtxq_unbusy(&uq->uq_key);
823 umtxq_unlock(&uq->uq_key);
826 * Set the contested bit so that a release in user space
827 * knows to use the system call for unlock. If this fails
828 * either some one else has acquired the lock or it has been
831 old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
833 /* The address was invalid. */
835 umtxq_lock(&uq->uq_key);
837 umtxq_unlock(&uq->uq_key);
838 umtx_key_release(&uq->uq_key);
843 * We set the contested bit, sleep. Otherwise the lock changed
844 * and we need to retry or we lost a race to the thread
845 * unlocking the umtx.
847 umtxq_lock(&uq->uq_key);
849 error = umtxq_sleep(uq, "umtx", timo);
851 umtxq_unlock(&uq->uq_key);
852 umtx_key_release(&uq->uq_key);
859 * Lock a umtx object.
862 do_lock_umtx32(struct thread *td, void *m, uint32_t id,
863 struct timespec *timeout)
865 struct timespec ts, ts2, ts3;
869 if (timeout == NULL) {
870 error = _do_lock_umtx32(td, m, id, 0);
871 /* Mutex locking is restarted if it is interrupted. */
876 timespecadd(&ts, timeout);
877 TIMESPEC_TO_TIMEVAL(&tv, timeout);
879 error = _do_lock_umtx32(td, m, id, tvtohz(&tv));
880 if (error != ETIMEDOUT)
883 if (timespeccmp(&ts2, &ts, >=)) {
888 timespecsub(&ts3, &ts2);
889 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
891 /* Timed-locking is not restarted. */
892 if (error == ERESTART)
899 * Unlock a umtx object.
902 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id)
911 * Make sure we own this mtx.
917 if ((owner & ~UMUTEX_CONTESTED) != id)
920 /* This should be done in userland */
921 if ((owner & UMUTEX_CONTESTED) == 0) {
922 old = casuword32(m, owner, UMUTEX_UNOWNED);
930 /* We should only ever be in here for contested locks */
931 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
937 count = umtxq_count(&key);
941 * When unlocking the umtx, it must be marked as unowned if
942 * there is zero or one thread only waiting for it.
943 * Otherwise, it must be marked as contested.
945 old = casuword32(m, owner,
946 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
948 umtxq_signal(&key,1);
951 umtx_key_release(&key);
961 * Fetch and compare value, sleep on the address if value is not changed.
964 do_wait(struct thread *td, void *addr, u_long id,
965 struct timespec *timeout, int compat32, int is_private)
968 struct timespec ts, ts2, ts3;
974 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
975 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
978 umtxq_lock(&uq->uq_key);
980 umtxq_unlock(&uq->uq_key);
984 tmp = (unsigned int)fuword32(addr);
986 umtxq_lock(&uq->uq_key);
988 umtxq_unlock(&uq->uq_key);
989 } else if (timeout == NULL) {
990 umtxq_lock(&uq->uq_key);
991 error = umtxq_sleep(uq, "uwait", 0);
993 umtxq_unlock(&uq->uq_key);
996 timespecadd(&ts, timeout);
997 TIMESPEC_TO_TIMEVAL(&tv, timeout);
998 umtxq_lock(&uq->uq_key);
1000 error = umtxq_sleep(uq, "uwait", tvtohz(&tv));
1001 if (!(uq->uq_flags & UQF_UMTXQ))
1003 if (error != ETIMEDOUT)
1005 umtxq_unlock(&uq->uq_key);
1006 getnanouptime(&ts2);
1007 if (timespeccmp(&ts2, &ts, >=)) {
1009 umtxq_lock(&uq->uq_key);
1013 timespecsub(&ts3, &ts2);
1014 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
1015 umtxq_lock(&uq->uq_key);
1018 umtxq_unlock(&uq->uq_key);
1020 umtx_key_release(&uq->uq_key);
1021 if (error == ERESTART)
1027 * Wake up threads sleeping on the specified address.
1030 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1032 struct umtx_key key;
1035 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1036 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1039 ret = umtxq_signal(&key, n_wake);
1041 umtx_key_release(&key);
1046 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1049 _do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, int timo,
1053 uint32_t owner, old, id;
1060 * Care must be exercised when dealing with umtx structure. It
1061 * can fault on any access.
1064 owner = fuword32(__DEVOLATILE(void *, &m->m_owner));
1065 if (mode == _UMUTEX_WAIT) {
1066 if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED)
1070 * Try the uncontested case. This should be done in userland.
1072 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1074 /* The acquire succeeded. */
1075 if (owner == UMUTEX_UNOWNED)
1078 /* The address was invalid. */
1082 /* If no one owns it but it is contested try to acquire it. */
1083 if (owner == UMUTEX_CONTESTED) {
1084 owner = casuword32(&m->m_owner,
1085 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1087 if (owner == UMUTEX_CONTESTED)
1090 /* The address was invalid. */
1094 /* If this failed the lock has changed, restart. */
1099 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1100 (owner & ~UMUTEX_CONTESTED) == id)
1103 if (mode == _UMUTEX_TRY)
1107 * If we caught a signal, we have retried and now
1113 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1114 GET_SHARE(flags), &uq->uq_key)) != 0)
1117 umtxq_lock(&uq->uq_key);
1118 umtxq_busy(&uq->uq_key);
1120 umtxq_unlock(&uq->uq_key);
1123 * Set the contested bit so that a release in user space
1124 * knows to use the system call for unlock. If this fails
1125 * either some one else has acquired the lock or it has been
1128 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1130 /* The address was invalid. */
1132 umtxq_lock(&uq->uq_key);
1134 umtxq_unbusy(&uq->uq_key);
1135 umtxq_unlock(&uq->uq_key);
1136 umtx_key_release(&uq->uq_key);
1141 * We set the contested bit, sleep. Otherwise the lock changed
1142 * and we need to retry or we lost a race to the thread
1143 * unlocking the umtx.
1145 umtxq_lock(&uq->uq_key);
1146 umtxq_unbusy(&uq->uq_key);
1148 error = umtxq_sleep(uq, "umtxn", timo);
1150 umtxq_unlock(&uq->uq_key);
1151 umtx_key_release(&uq->uq_key);
1158 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1161 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1164 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags)
1166 struct umtx_key key;
1167 uint32_t owner, old, id;
1173 * Make sure we own this mtx.
1175 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1179 if ((owner & ~UMUTEX_CONTESTED) != id)
1182 if ((owner & UMUTEX_CONTESTED) == 0) {
1183 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1191 /* We should only ever be in here for contested locks */
1192 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1198 count = umtxq_count(&key);
1202 * When unlocking the umtx, it must be marked as unowned if
1203 * there is zero or one thread only waiting for it.
1204 * Otherwise, it must be marked as contested.
1206 old = casuword32(&m->m_owner, owner,
1207 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1209 umtxq_signal(&key,1);
1212 umtx_key_release(&key);
1221 * Check if the mutex is available and wake up a waiter,
1222 * only for simple mutex.
1225 do_wake_umutex(struct thread *td, struct umutex *m)
1227 struct umtx_key key;
1233 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1237 if ((owner & ~UMUTEX_CONTESTED) != 0)
1240 flags = fuword32(&m->m_flags);
1242 /* We should only ever be in here for contested locks */
1243 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1249 count = umtxq_count(&key);
1253 owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED);
1256 if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1257 umtxq_signal(&key, 1);
1260 umtx_key_release(&key);
1265 * Check if the mutex has waiters and tries to fix contention bit.
1268 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1270 struct umtx_key key;
1271 uint32_t owner, old;
1276 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
1278 type = TYPE_NORMAL_UMUTEX;
1280 case UMUTEX_PRIO_INHERIT:
1281 type = TYPE_PI_UMUTEX;
1283 case UMUTEX_PRIO_PROTECT:
1284 type = TYPE_PP_UMUTEX;
1289 if ((error = umtx_key_get(m, type, GET_SHARE(flags),
1296 count = umtxq_count(&key);
1299 * Only repair contention bit if there is a waiter, this means the mutex
1300 * is still being referenced by userland code, otherwise don't update
1304 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1305 while ((owner & UMUTEX_CONTESTED) ==0) {
1306 old = casuword32(&m->m_owner, owner,
1307 owner|UMUTEX_CONTESTED);
1312 } else if (count == 1) {
1313 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1314 while ((owner & ~UMUTEX_CONTESTED) != 0 &&
1315 (owner & UMUTEX_CONTESTED) == 0) {
1316 old = casuword32(&m->m_owner, owner,
1317 owner|UMUTEX_CONTESTED);
1326 umtxq_signal(&key, INT_MAX);
1328 else if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1329 umtxq_signal(&key, 1);
1332 umtx_key_release(&key);
1336 static inline struct umtx_pi *
1337 umtx_pi_alloc(int flags)
1341 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1342 TAILQ_INIT(&pi->pi_blocked);
1343 atomic_add_int(&umtx_pi_allocated, 1);
1348 umtx_pi_free(struct umtx_pi *pi)
1350 uma_zfree(umtx_pi_zone, pi);
1351 atomic_add_int(&umtx_pi_allocated, -1);
1355 * Adjust the thread's position on a pi_state after its priority has been
1359 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1361 struct umtx_q *uq, *uq1, *uq2;
1364 mtx_assert(&umtx_lock, MA_OWNED);
1371 * Check if the thread needs to be moved on the blocked chain.
1372 * It needs to be moved if either its priority is lower than
1373 * the previous thread or higher than the next thread.
1375 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1376 uq2 = TAILQ_NEXT(uq, uq_lockq);
1377 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1378 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1380 * Remove thread from blocked chain and determine where
1381 * it should be moved to.
1383 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1384 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1385 td1 = uq1->uq_thread;
1386 MPASS(td1->td_proc->p_magic == P_MAGIC);
1387 if (UPRI(td1) > UPRI(td))
1392 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1394 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1400 * Propagate priority when a thread is blocked on POSIX
1404 umtx_propagate_priority(struct thread *td)
1410 mtx_assert(&umtx_lock, MA_OWNED);
1413 pi = uq->uq_pi_blocked;
1422 MPASS(td->td_proc != NULL);
1423 MPASS(td->td_proc->p_magic == P_MAGIC);
1425 if (UPRI(td) <= pri)
1429 sched_lend_user_prio(td, pri);
1433 * Pick up the lock that td is blocked on.
1436 pi = uq->uq_pi_blocked;
1437 /* Resort td on the list if needed. */
1438 if (!umtx_pi_adjust_thread(pi, td))
1444 * Unpropagate priority for a PI mutex when a thread blocked on
1445 * it is interrupted by signal or resumed by others.
1448 umtx_unpropagate_priority(struct umtx_pi *pi)
1450 struct umtx_q *uq, *uq_owner;
1451 struct umtx_pi *pi2;
1454 mtx_assert(&umtx_lock, MA_OWNED);
1456 while (pi != NULL && pi->pi_owner != NULL) {
1458 uq_owner = pi->pi_owner->td_umtxq;
1460 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1461 uq = TAILQ_FIRST(&pi2->pi_blocked);
1463 if (pri > UPRI(uq->uq_thread))
1464 pri = UPRI(uq->uq_thread);
1468 if (pri > uq_owner->uq_inherited_pri)
1469 pri = uq_owner->uq_inherited_pri;
1470 thread_lock(pi->pi_owner);
1471 oldpri = pi->pi_owner->td_user_pri;
1472 sched_unlend_user_prio(pi->pi_owner, pri);
1473 thread_unlock(pi->pi_owner);
1474 if (uq_owner->uq_pi_blocked != NULL)
1475 umtx_pi_adjust_locked(pi->pi_owner, oldpri);
1476 pi = uq_owner->uq_pi_blocked;
1481 * Insert a PI mutex into owned list.
1484 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1486 struct umtx_q *uq_owner;
1488 uq_owner = owner->td_umtxq;
1489 mtx_assert(&umtx_lock, MA_OWNED);
1490 if (pi->pi_owner != NULL)
1491 panic("pi_ower != NULL");
1492 pi->pi_owner = owner;
1493 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1497 * Claim ownership of a PI mutex.
1500 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1502 struct umtx_q *uq, *uq_owner;
1504 uq_owner = owner->td_umtxq;
1505 mtx_lock_spin(&umtx_lock);
1506 if (pi->pi_owner == owner) {
1507 mtx_unlock_spin(&umtx_lock);
1511 if (pi->pi_owner != NULL) {
1513 * userland may have already messed the mutex, sigh.
1515 mtx_unlock_spin(&umtx_lock);
1518 umtx_pi_setowner(pi, owner);
1519 uq = TAILQ_FIRST(&pi->pi_blocked);
1523 pri = UPRI(uq->uq_thread);
1525 if (pri < UPRI(owner))
1526 sched_lend_user_prio(owner, pri);
1527 thread_unlock(owner);
1529 mtx_unlock_spin(&umtx_lock);
1534 umtx_pi_adjust_locked(struct thread *td, u_char oldpri)
1541 * Pick up the lock that td is blocked on.
1543 pi = uq->uq_pi_blocked;
1546 /* Resort the turnstile on the list. */
1547 if (!umtx_pi_adjust_thread(pi, td))
1551 * If our priority was lowered and we are at the head of the
1552 * turnstile, then propagate our new priority up the chain.
1554 if (uq == TAILQ_FIRST(&pi->pi_blocked) && UPRI(td) < oldpri)
1555 umtx_propagate_priority(td);
1559 * Adjust a thread's order position in its blocked PI mutex,
1560 * this may result new priority propagating process.
1563 umtx_pi_adjust(struct thread *td, u_char oldpri)
1569 mtx_lock_spin(&umtx_lock);
1571 * Pick up the lock that td is blocked on.
1573 pi = uq->uq_pi_blocked;
1575 umtx_pi_adjust_locked(td, oldpri);
1576 mtx_unlock_spin(&umtx_lock);
1580 * Sleep on a PI mutex.
1583 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
1584 uint32_t owner, const char *wmesg, int timo)
1586 struct umtxq_chain *uc;
1587 struct thread *td, *td1;
1593 KASSERT(td == curthread, ("inconsistent uq_thread"));
1594 uc = umtxq_getchain(&uq->uq_key);
1595 UMTXQ_LOCKED_ASSERT(uc);
1596 UMTXQ_BUSY_ASSERT(uc);
1598 mtx_lock_spin(&umtx_lock);
1599 if (pi->pi_owner == NULL) {
1601 * Current, We only support process private PI-mutex,
1602 * non-contended PI-mutexes are locked in userland.
1603 * Process shared PI-mutex should always be initialized
1604 * by kernel and be registered in kernel, locking should
1605 * always be done by kernel to avoid security problems.
1606 * For process private PI-mutex, we can find owner
1607 * thread and boost its priority safely.
1609 mtx_unlock_spin(&umtx_lock);
1611 td1 = thread_find(curproc, owner);
1612 mtx_lock_spin(&umtx_lock);
1613 if (td1 != NULL && pi->pi_owner == NULL) {
1614 uq1 = td1->td_umtxq;
1615 umtx_pi_setowner(pi, td1);
1617 PROC_UNLOCK(curproc);
1620 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1621 pri = UPRI(uq1->uq_thread);
1627 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1629 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1631 uq->uq_pi_blocked = pi;
1633 td->td_flags |= TDF_UPIBLOCKED;
1635 umtx_propagate_priority(td);
1636 mtx_unlock_spin(&umtx_lock);
1637 umtxq_unbusy(&uq->uq_key);
1639 if (uq->uq_flags & UQF_UMTXQ) {
1640 error = msleep(uq, &uc->uc_lock, PCATCH, wmesg, timo);
1641 if (error == EWOULDBLOCK)
1643 if (uq->uq_flags & UQF_UMTXQ) {
1647 mtx_lock_spin(&umtx_lock);
1648 uq->uq_pi_blocked = NULL;
1650 td->td_flags &= ~TDF_UPIBLOCKED;
1652 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1653 umtx_unpropagate_priority(pi);
1654 mtx_unlock_spin(&umtx_lock);
1655 umtxq_unlock(&uq->uq_key);
1661 * Add reference count for a PI mutex.
1664 umtx_pi_ref(struct umtx_pi *pi)
1666 struct umtxq_chain *uc;
1668 uc = umtxq_getchain(&pi->pi_key);
1669 UMTXQ_LOCKED_ASSERT(uc);
1674 * Decrease reference count for a PI mutex, if the counter
1675 * is decreased to zero, its memory space is freed.
1678 umtx_pi_unref(struct umtx_pi *pi)
1680 struct umtxq_chain *uc;
1682 uc = umtxq_getchain(&pi->pi_key);
1683 UMTXQ_LOCKED_ASSERT(uc);
1684 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1685 if (--pi->pi_refcount == 0) {
1686 mtx_lock_spin(&umtx_lock);
1687 if (pi->pi_owner != NULL) {
1688 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
1690 pi->pi_owner = NULL;
1692 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1693 ("blocked queue not empty"));
1694 mtx_unlock_spin(&umtx_lock);
1695 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1701 * Find a PI mutex in hash table.
1703 static struct umtx_pi *
1704 umtx_pi_lookup(struct umtx_key *key)
1706 struct umtxq_chain *uc;
1709 uc = umtxq_getchain(key);
1710 UMTXQ_LOCKED_ASSERT(uc);
1712 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
1713 if (umtx_key_match(&pi->pi_key, key)) {
1721 * Insert a PI mutex into hash table.
1724 umtx_pi_insert(struct umtx_pi *pi)
1726 struct umtxq_chain *uc;
1728 uc = umtxq_getchain(&pi->pi_key);
1729 UMTXQ_LOCKED_ASSERT(uc);
1730 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
1737 _do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, int timo,
1741 struct umtx_pi *pi, *new_pi;
1742 uint32_t id, owner, old;
1748 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1751 umtxq_lock(&uq->uq_key);
1752 pi = umtx_pi_lookup(&uq->uq_key);
1754 new_pi = umtx_pi_alloc(M_NOWAIT);
1755 if (new_pi == NULL) {
1756 umtxq_unlock(&uq->uq_key);
1757 new_pi = umtx_pi_alloc(M_WAITOK);
1758 umtxq_lock(&uq->uq_key);
1759 pi = umtx_pi_lookup(&uq->uq_key);
1761 umtx_pi_free(new_pi);
1765 if (new_pi != NULL) {
1766 new_pi->pi_key = uq->uq_key;
1767 umtx_pi_insert(new_pi);
1772 umtxq_unlock(&uq->uq_key);
1775 * Care must be exercised when dealing with umtx structure. It
1776 * can fault on any access.
1780 * Try the uncontested case. This should be done in userland.
1782 owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id);
1784 /* The acquire succeeded. */
1785 if (owner == UMUTEX_UNOWNED) {
1790 /* The address was invalid. */
1796 /* If no one owns it but it is contested try to acquire it. */
1797 if (owner == UMUTEX_CONTESTED) {
1798 owner = casuword32(&m->m_owner,
1799 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1801 if (owner == UMUTEX_CONTESTED) {
1802 umtxq_lock(&uq->uq_key);
1803 umtxq_busy(&uq->uq_key);
1804 error = umtx_pi_claim(pi, td);
1805 umtxq_unbusy(&uq->uq_key);
1806 umtxq_unlock(&uq->uq_key);
1810 /* The address was invalid. */
1816 /* If this failed the lock has changed, restart. */
1820 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1821 (owner & ~UMUTEX_CONTESTED) == id) {
1832 * If we caught a signal, we have retried and now
1838 umtxq_lock(&uq->uq_key);
1839 umtxq_busy(&uq->uq_key);
1840 umtxq_unlock(&uq->uq_key);
1843 * Set the contested bit so that a release in user space
1844 * knows to use the system call for unlock. If this fails
1845 * either some one else has acquired the lock or it has been
1848 old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED);
1850 /* The address was invalid. */
1852 umtxq_lock(&uq->uq_key);
1853 umtxq_unbusy(&uq->uq_key);
1854 umtxq_unlock(&uq->uq_key);
1859 umtxq_lock(&uq->uq_key);
1861 * We set the contested bit, sleep. Otherwise the lock changed
1862 * and we need to retry or we lost a race to the thread
1863 * unlocking the umtx.
1866 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
1869 umtxq_unbusy(&uq->uq_key);
1870 umtxq_unlock(&uq->uq_key);
1874 umtxq_lock(&uq->uq_key);
1876 umtxq_unlock(&uq->uq_key);
1878 umtx_key_release(&uq->uq_key);
1883 * Unlock a PI mutex.
1886 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
1888 struct umtx_key key;
1889 struct umtx_q *uq_first, *uq_first2, *uq_me;
1890 struct umtx_pi *pi, *pi2;
1891 uint32_t owner, old, id;
1898 * Make sure we own this mtx.
1900 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
1904 if ((owner & ~UMUTEX_CONTESTED) != id)
1907 /* This should be done in userland */
1908 if ((owner & UMUTEX_CONTESTED) == 0) {
1909 old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED);
1917 /* We should only ever be in here for contested locks */
1918 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
1924 count = umtxq_count_pi(&key, &uq_first);
1925 if (uq_first != NULL) {
1926 mtx_lock_spin(&umtx_lock);
1927 pi = uq_first->uq_pi_blocked;
1928 KASSERT(pi != NULL, ("pi == NULL?"));
1929 if (pi->pi_owner != curthread) {
1930 mtx_unlock_spin(&umtx_lock);
1933 umtx_key_release(&key);
1934 /* userland messed the mutex */
1937 uq_me = curthread->td_umtxq;
1938 pi->pi_owner = NULL;
1939 TAILQ_REMOVE(&uq_me->uq_pi_contested, pi, pi_link);
1940 /* get highest priority thread which is still sleeping. */
1941 uq_first = TAILQ_FIRST(&pi->pi_blocked);
1942 while (uq_first != NULL &&
1943 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
1944 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
1947 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
1948 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
1949 if (uq_first2 != NULL) {
1950 if (pri > UPRI(uq_first2->uq_thread))
1951 pri = UPRI(uq_first2->uq_thread);
1954 thread_lock(curthread);
1955 sched_unlend_user_prio(curthread, pri);
1956 thread_unlock(curthread);
1957 mtx_unlock_spin(&umtx_lock);
1959 umtxq_signal_thread(uq_first);
1964 * When unlocking the umtx, it must be marked as unowned if
1965 * there is zero or one thread only waiting for it.
1966 * Otherwise, it must be marked as contested.
1968 old = casuword32(&m->m_owner, owner,
1969 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1974 umtx_key_release(&key);
1986 _do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, int timo,
1989 struct umtx_q *uq, *uq2;
1993 int error, pri, old_inherited_pri, su;
1997 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2000 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2002 old_inherited_pri = uq->uq_inherited_pri;
2003 umtxq_lock(&uq->uq_key);
2004 umtxq_busy(&uq->uq_key);
2005 umtxq_unlock(&uq->uq_key);
2007 ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]);
2008 if (ceiling > RTP_PRIO_MAX) {
2013 mtx_lock_spin(&umtx_lock);
2014 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2015 mtx_unlock_spin(&umtx_lock);
2019 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2020 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2022 if (uq->uq_inherited_pri < UPRI(td))
2023 sched_lend_user_prio(td, uq->uq_inherited_pri);
2026 mtx_unlock_spin(&umtx_lock);
2028 owner = casuword32(&m->m_owner,
2029 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2031 if (owner == UMUTEX_CONTESTED) {
2036 /* The address was invalid. */
2042 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
2043 (owner & ~UMUTEX_CONTESTED) == id) {
2054 * If we caught a signal, we have retried and now
2060 umtxq_lock(&uq->uq_key);
2062 umtxq_unbusy(&uq->uq_key);
2063 error = umtxq_sleep(uq, "umtxpp", timo);
2065 umtxq_unlock(&uq->uq_key);
2067 mtx_lock_spin(&umtx_lock);
2068 uq->uq_inherited_pri = old_inherited_pri;
2070 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2071 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2073 if (pri > UPRI(uq2->uq_thread))
2074 pri = UPRI(uq2->uq_thread);
2077 if (pri > uq->uq_inherited_pri)
2078 pri = uq->uq_inherited_pri;
2080 sched_unlend_user_prio(td, pri);
2082 mtx_unlock_spin(&umtx_lock);
2086 mtx_lock_spin(&umtx_lock);
2087 uq->uq_inherited_pri = old_inherited_pri;
2089 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2090 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2092 if (pri > UPRI(uq2->uq_thread))
2093 pri = UPRI(uq2->uq_thread);
2096 if (pri > uq->uq_inherited_pri)
2097 pri = uq->uq_inherited_pri;
2099 sched_unlend_user_prio(td, pri);
2101 mtx_unlock_spin(&umtx_lock);
2105 umtxq_lock(&uq->uq_key);
2106 umtxq_unbusy(&uq->uq_key);
2107 umtxq_unlock(&uq->uq_key);
2108 umtx_key_release(&uq->uq_key);
2113 * Unlock a PP mutex.
2116 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
2118 struct umtx_key key;
2119 struct umtx_q *uq, *uq2;
2123 int error, pri, new_inherited_pri, su;
2127 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2130 * Make sure we own this mtx.
2132 owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner));
2136 if ((owner & ~UMUTEX_CONTESTED) != id)
2139 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2144 new_inherited_pri = PRI_MAX;
2146 rceiling = RTP_PRIO_MAX - rceiling;
2147 if (rceiling > RTP_PRIO_MAX)
2149 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2152 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2159 * For priority protected mutex, always set unlocked state
2160 * to UMUTEX_CONTESTED, so that userland always enters kernel
2161 * to lock the mutex, it is necessary because thread priority
2162 * has to be adjusted for such mutex.
2164 error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2169 umtxq_signal(&key, 1);
2176 mtx_lock_spin(&umtx_lock);
2178 uq->uq_inherited_pri = new_inherited_pri;
2180 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2181 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2183 if (pri > UPRI(uq2->uq_thread))
2184 pri = UPRI(uq2->uq_thread);
2187 if (pri > uq->uq_inherited_pri)
2188 pri = uq->uq_inherited_pri;
2190 sched_unlend_user_prio(td, pri);
2192 mtx_unlock_spin(&umtx_lock);
2194 umtx_key_release(&key);
2199 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2200 uint32_t *old_ceiling)
2203 uint32_t save_ceiling;
2208 flags = fuword32(&m->m_flags);
2209 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2211 if (ceiling > RTP_PRIO_MAX)
2215 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2219 umtxq_lock(&uq->uq_key);
2220 umtxq_busy(&uq->uq_key);
2221 umtxq_unlock(&uq->uq_key);
2223 save_ceiling = fuword32(&m->m_ceilings[0]);
2225 owner = casuword32(&m->m_owner,
2226 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
2228 if (owner == UMUTEX_CONTESTED) {
2229 suword32(&m->m_ceilings[0], ceiling);
2230 suword32(__DEVOLATILE(uint32_t *, &m->m_owner),
2236 /* The address was invalid. */
2242 if ((owner & ~UMUTEX_CONTESTED) == id) {
2243 suword32(&m->m_ceilings[0], ceiling);
2249 * If we caught a signal, we have retried and now
2256 * We set the contested bit, sleep. Otherwise the lock changed
2257 * and we need to retry or we lost a race to the thread
2258 * unlocking the umtx.
2260 umtxq_lock(&uq->uq_key);
2262 umtxq_unbusy(&uq->uq_key);
2263 error = umtxq_sleep(uq, "umtxpp", 0);
2265 umtxq_unlock(&uq->uq_key);
2267 umtxq_lock(&uq->uq_key);
2269 umtxq_signal(&uq->uq_key, INT_MAX);
2270 umtxq_unbusy(&uq->uq_key);
2271 umtxq_unlock(&uq->uq_key);
2272 umtx_key_release(&uq->uq_key);
2273 if (error == 0 && old_ceiling != NULL)
2274 suword32(old_ceiling, save_ceiling);
2279 _do_lock_umutex(struct thread *td, struct umutex *m, int flags, int timo,
2282 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2284 return (_do_lock_normal(td, m, flags, timo, mode));
2285 case UMUTEX_PRIO_INHERIT:
2286 return (_do_lock_pi(td, m, flags, timo, mode));
2287 case UMUTEX_PRIO_PROTECT:
2288 return (_do_lock_pp(td, m, flags, timo, mode));
2294 * Lock a userland POSIX mutex.
2297 do_lock_umutex(struct thread *td, struct umutex *m,
2298 struct timespec *timeout, int mode)
2300 struct timespec ts, ts2, ts3;
2305 flags = fuword32(&m->m_flags);
2309 if (timeout == NULL) {
2310 error = _do_lock_umutex(td, m, flags, 0, mode);
2311 /* Mutex locking is restarted if it is interrupted. */
2312 if (error == EINTR && mode != _UMUTEX_WAIT)
2316 timespecadd(&ts, timeout);
2317 TIMESPEC_TO_TIMEVAL(&tv, timeout);
2319 error = _do_lock_umutex(td, m, flags, tvtohz(&tv), mode);
2320 if (error != ETIMEDOUT)
2322 getnanouptime(&ts2);
2323 if (timespeccmp(&ts2, &ts, >=)) {
2328 timespecsub(&ts3, &ts2);
2329 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2331 /* Timed-locking is not restarted. */
2332 if (error == ERESTART)
2339 * Unlock a userland POSIX mutex.
2342 do_unlock_umutex(struct thread *td, struct umutex *m)
2346 flags = fuword32(&m->m_flags);
2350 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2352 return (do_unlock_normal(td, m, flags));
2353 case UMUTEX_PRIO_INHERIT:
2354 return (do_unlock_pi(td, m, flags));
2355 case UMUTEX_PRIO_PROTECT:
2356 return (do_unlock_pp(td, m, flags));
2363 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2364 struct timespec *timeout, u_long wflags)
2368 struct timespec cts, ets, tts;
2373 flags = fuword32(&cv->c_flags);
2374 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2377 umtxq_lock(&uq->uq_key);
2378 umtxq_busy(&uq->uq_key);
2380 umtxq_unlock(&uq->uq_key);
2383 * The magic thing is we should set c_has_waiters to 1 before
2384 * releasing user mutex.
2386 suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1);
2388 umtxq_lock(&uq->uq_key);
2389 umtxq_unbusy(&uq->uq_key);
2390 umtxq_unlock(&uq->uq_key);
2392 error = do_unlock_umutex(td, m);
2394 umtxq_lock(&uq->uq_key);
2396 if ((wflags & UMTX_CHECK_UNPARKING) &&
2397 (td->td_pflags & TDP_WAKEUP)) {
2398 td->td_pflags &= ~TDP_WAKEUP;
2400 } else if (timeout == NULL) {
2401 error = umtxq_sleep(uq, "ucond", 0);
2403 getnanouptime(&ets);
2404 timespecadd(&ets, timeout);
2405 TIMESPEC_TO_TIMEVAL(&tv, timeout);
2407 error = umtxq_sleep(uq, "ucond", tvtohz(&tv));
2408 if (error != ETIMEDOUT)
2410 getnanouptime(&cts);
2411 if (timespeccmp(&cts, &ets, >=)) {
2416 timespecsub(&tts, &cts);
2417 TIMESPEC_TO_TIMEVAL(&tv, &tts);
2423 if ((uq->uq_flags & UQF_UMTXQ) == 0) {
2425 * If we concurrently got do_cv_signal()d
2426 * and we got an error or UNIX signals or a timeout,
2427 * then, perform another umtxq_signal to avoid
2428 * consuming the wakeup. This may cause supurious
2429 * wakeup for another thread which was just queued,
2430 * but SUSV3 explicitly allows supurious wakeup to
2431 * occur, and indeed a kernel based implementation
2434 if (!umtxq_signal(&uq->uq_key, 1))
2437 if (error == ERESTART)
2441 umtxq_unlock(&uq->uq_key);
2442 umtx_key_release(&uq->uq_key);
2447 * Signal a userland condition variable.
2450 do_cv_signal(struct thread *td, struct ucond *cv)
2452 struct umtx_key key;
2453 int error, cnt, nwake;
2456 flags = fuword32(&cv->c_flags);
2457 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2461 cnt = umtxq_count(&key);
2462 nwake = umtxq_signal(&key, 1);
2466 __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2471 umtx_key_release(&key);
2476 do_cv_broadcast(struct thread *td, struct ucond *cv)
2478 struct umtx_key key;
2482 flags = fuword32(&cv->c_flags);
2483 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2488 umtxq_signal(&key, INT_MAX);
2491 error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0);
2497 umtx_key_release(&key);
2502 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, int timo)
2505 uint32_t flags, wrflags;
2506 int32_t state, oldstate;
2507 int32_t blocked_readers;
2511 flags = fuword32(&rwlock->rw_flags);
2512 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2516 wrflags = URWLOCK_WRITE_OWNER;
2517 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2518 wrflags |= URWLOCK_WRITE_WAITERS;
2521 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2522 /* try to lock it */
2523 while (!(state & wrflags)) {
2524 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2525 umtx_key_release(&uq->uq_key);
2528 oldstate = casuword32(&rwlock->rw_state, state, state + 1);
2529 if (oldstate == state) {
2530 umtx_key_release(&uq->uq_key);
2539 /* grab monitor lock */
2540 umtxq_lock(&uq->uq_key);
2541 umtxq_busy(&uq->uq_key);
2542 umtxq_unlock(&uq->uq_key);
2545 * re-read the state, in case it changed between the try-lock above
2546 * and the check below
2548 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2550 /* set read contention bit */
2551 while ((state & wrflags) && !(state & URWLOCK_READ_WAITERS)) {
2552 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_READ_WAITERS);
2553 if (oldstate == state)
2558 /* state is changed while setting flags, restart */
2559 if (!(state & wrflags)) {
2560 umtxq_lock(&uq->uq_key);
2561 umtxq_unbusy(&uq->uq_key);
2562 umtxq_unlock(&uq->uq_key);
2567 /* contention bit is set, before sleeping, increase read waiter count */
2568 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2569 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2571 while (state & wrflags) {
2572 umtxq_lock(&uq->uq_key);
2574 umtxq_unbusy(&uq->uq_key);
2576 error = umtxq_sleep(uq, "urdlck", timo);
2578 umtxq_busy(&uq->uq_key);
2580 umtxq_unlock(&uq->uq_key);
2583 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2586 /* decrease read waiter count, and may clear read contention bit */
2587 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2588 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2589 if (blocked_readers == 1) {
2590 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2592 oldstate = casuword32(&rwlock->rw_state, state,
2593 state & ~URWLOCK_READ_WAITERS);
2594 if (oldstate == state)
2600 umtxq_lock(&uq->uq_key);
2601 umtxq_unbusy(&uq->uq_key);
2602 umtxq_unlock(&uq->uq_key);
2604 umtx_key_release(&uq->uq_key);
2609 do_rw_rdlock2(struct thread *td, void *obj, long val, struct timespec *timeout)
2611 struct timespec ts, ts2, ts3;
2616 timespecadd(&ts, timeout);
2617 TIMESPEC_TO_TIMEVAL(&tv, timeout);
2619 error = do_rw_rdlock(td, obj, val, tvtohz(&tv));
2620 if (error != ETIMEDOUT)
2622 getnanouptime(&ts2);
2623 if (timespeccmp(&ts2, &ts, >=)) {
2628 timespecsub(&ts3, &ts2);
2629 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2631 if (error == ERESTART)
2637 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, int timo)
2641 int32_t state, oldstate;
2642 int32_t blocked_writers;
2643 int32_t blocked_readers;
2647 flags = fuword32(&rwlock->rw_flags);
2648 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2652 blocked_readers = 0;
2654 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2655 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2656 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER);
2657 if (oldstate == state) {
2658 umtx_key_release(&uq->uq_key);
2665 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
2666 blocked_readers != 0) {
2667 umtxq_lock(&uq->uq_key);
2668 umtxq_busy(&uq->uq_key);
2669 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
2670 umtxq_unbusy(&uq->uq_key);
2671 umtxq_unlock(&uq->uq_key);
2677 /* grab monitor lock */
2678 umtxq_lock(&uq->uq_key);
2679 umtxq_busy(&uq->uq_key);
2680 umtxq_unlock(&uq->uq_key);
2683 * re-read the state, in case it changed between the try-lock above
2684 * and the check below
2686 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2688 while (((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) &&
2689 (state & URWLOCK_WRITE_WAITERS) == 0) {
2690 oldstate = casuword32(&rwlock->rw_state, state, state | URWLOCK_WRITE_WAITERS);
2691 if (oldstate == state)
2696 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
2697 umtxq_lock(&uq->uq_key);
2698 umtxq_unbusy(&uq->uq_key);
2699 umtxq_unlock(&uq->uq_key);
2703 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2704 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
2706 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
2707 umtxq_lock(&uq->uq_key);
2708 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2709 umtxq_unbusy(&uq->uq_key);
2711 error = umtxq_sleep(uq, "uwrlck", timo);
2713 umtxq_busy(&uq->uq_key);
2714 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
2715 umtxq_unlock(&uq->uq_key);
2718 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2721 blocked_writers = fuword32(&rwlock->rw_blocked_writers);
2722 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
2723 if (blocked_writers == 1) {
2724 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2726 oldstate = casuword32(&rwlock->rw_state, state,
2727 state & ~URWLOCK_WRITE_WAITERS);
2728 if (oldstate == state)
2732 blocked_readers = fuword32(&rwlock->rw_blocked_readers);
2734 blocked_readers = 0;
2736 umtxq_lock(&uq->uq_key);
2737 umtxq_unbusy(&uq->uq_key);
2738 umtxq_unlock(&uq->uq_key);
2741 umtx_key_release(&uq->uq_key);
2746 do_rw_wrlock2(struct thread *td, void *obj, struct timespec *timeout)
2748 struct timespec ts, ts2, ts3;
2753 timespecadd(&ts, timeout);
2754 TIMESPEC_TO_TIMEVAL(&tv, timeout);
2756 error = do_rw_wrlock(td, obj, tvtohz(&tv));
2757 if (error != ETIMEDOUT)
2759 getnanouptime(&ts2);
2760 if (timespeccmp(&ts2, &ts, >=)) {
2765 timespecsub(&ts3, &ts2);
2766 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
2768 if (error == ERESTART)
2774 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
2778 int32_t state, oldstate;
2779 int error, q, count;
2782 flags = fuword32(&rwlock->rw_flags);
2783 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2787 state = fuword32(__DEVOLATILE(int32_t *, &rwlock->rw_state));
2788 if (state & URWLOCK_WRITE_OWNER) {
2790 oldstate = casuword32(&rwlock->rw_state, state,
2791 state & ~URWLOCK_WRITE_OWNER);
2792 if (oldstate != state) {
2794 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
2801 } else if (URWLOCK_READER_COUNT(state) != 0) {
2803 oldstate = casuword32(&rwlock->rw_state, state,
2805 if (oldstate != state) {
2807 if (URWLOCK_READER_COUNT(oldstate) == 0) {
2822 if (!(flags & URWLOCK_PREFER_READER)) {
2823 if (state & URWLOCK_WRITE_WAITERS) {
2825 q = UMTX_EXCLUSIVE_QUEUE;
2826 } else if (state & URWLOCK_READ_WAITERS) {
2828 q = UMTX_SHARED_QUEUE;
2831 if (state & URWLOCK_READ_WAITERS) {
2833 q = UMTX_SHARED_QUEUE;
2834 } else if (state & URWLOCK_WRITE_WAITERS) {
2836 q = UMTX_EXCLUSIVE_QUEUE;
2841 umtxq_lock(&uq->uq_key);
2842 umtxq_busy(&uq->uq_key);
2843 umtxq_signal_queue(&uq->uq_key, count, q);
2844 umtxq_unbusy(&uq->uq_key);
2845 umtxq_unlock(&uq->uq_key);
2848 umtx_key_release(&uq->uq_key);
2853 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
2854 /* struct umtx *umtx */
2856 return _do_lock_umtx(td, uap->umtx, td->td_tid, 0);
2860 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
2861 /* struct umtx *umtx */
2863 return do_unlock_umtx(td, uap->umtx, td->td_tid);
2867 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
2871 error = copyin(addr, tsp, sizeof(struct timespec));
2873 if (tsp->tv_sec < 0 ||
2874 tsp->tv_nsec >= 1000000000 ||
2882 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap)
2884 struct timespec *ts, timeout;
2887 /* Allow a null timespec (wait forever). */
2888 if (uap->uaddr2 == NULL)
2891 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
2896 return (do_lock_umtx(td, uap->obj, uap->val, ts));
2900 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap)
2902 return (do_unlock_umtx(td, uap->obj, uap->val));
2906 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
2908 struct timespec *ts, timeout;
2911 if (uap->uaddr2 == NULL)
2914 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
2919 return do_wait(td, uap->obj, uap->val, ts, 0, 0);
2923 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
2925 struct timespec *ts, timeout;
2928 if (uap->uaddr2 == NULL)
2931 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
2936 return do_wait(td, uap->obj, uap->val, ts, 1, 0);
2940 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
2942 struct timespec *ts, timeout;
2945 if (uap->uaddr2 == NULL)
2948 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
2953 return do_wait(td, uap->obj, uap->val, ts, 1, 1);
2957 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
2959 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
2963 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
2965 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
2969 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
2971 struct timespec *ts, timeout;
2974 /* Allow a null timespec (wait forever). */
2975 if (uap->uaddr2 == NULL)
2978 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
2983 return do_lock_umutex(td, uap->obj, ts, 0);
2987 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
2989 return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY);
2993 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
2995 struct timespec *ts, timeout;
2998 /* Allow a null timespec (wait forever). */
2999 if (uap->uaddr2 == NULL)
3002 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3007 return do_lock_umutex(td, uap->obj, ts, _UMUTEX_WAIT);
3011 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3013 return do_wake_umutex(td, uap->obj);
3017 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3019 return do_unlock_umutex(td, uap->obj);
3023 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3025 return do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1);
3029 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3031 struct timespec *ts, timeout;
3034 /* Allow a null timespec (wait forever). */
3035 if (uap->uaddr2 == NULL)
3038 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3043 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3047 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3049 return do_cv_signal(td, uap->obj);
3053 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3055 return do_cv_broadcast(td, uap->obj);
3059 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3061 struct timespec timeout;
3064 /* Allow a null timespec (wait forever). */
3065 if (uap->uaddr2 == NULL) {
3066 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3068 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3071 error = do_rw_rdlock2(td, uap->obj, uap->val, &timeout);
3077 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3079 struct timespec timeout;
3082 /* Allow a null timespec (wait forever). */
3083 if (uap->uaddr2 == NULL) {
3084 error = do_rw_wrlock(td, uap->obj, 0);
3086 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3090 error = do_rw_wrlock2(td, uap->obj, &timeout);
3096 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3098 return do_rw_unlock(td, uap->obj);
3102 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3104 return do_wake2_umutex(td, uap->obj, uap->val);
3108 __umtx_op_not_sup(struct thread *td __unused, struct _umtx_op_args *uap __unused)
3113 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3115 static _umtx_op_func op_table[] = {
3116 __umtx_op_lock_umtx, /* UMTX_OP_LOCK */
3117 __umtx_op_unlock_umtx, /* UMTX_OP_UNLOCK */
3118 __umtx_op_wait, /* UMTX_OP_WAIT */
3119 __umtx_op_wake, /* UMTX_OP_WAKE */
3120 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_TRYLOCK */
3121 __umtx_op_lock_umutex, /* UMTX_OP_MUTEX_LOCK */
3122 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
3123 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
3124 __umtx_op_cv_wait, /* UMTX_OP_CV_WAIT*/
3125 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
3126 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
3127 __umtx_op_wait_uint, /* UMTX_OP_WAIT_UINT */
3128 __umtx_op_rw_rdlock, /* UMTX_OP_RW_RDLOCK */
3129 __umtx_op_rw_wrlock, /* UMTX_OP_RW_WRLOCK */
3130 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
3131 __umtx_op_wait_uint_private, /* UMTX_OP_WAIT_UINT_PRIVATE */
3132 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
3133 __umtx_op_wait_umutex, /* UMTX_OP_UMUTEX_WAIT */
3134 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
3135 __umtx_op_not_sup, /* UMTX_OP_SEM_WAIT */
3136 __umtx_op_not_sup, /* UMTX_OP_SEM_WAKE */
3137 __umtx_op_not_sup, /* UMTX_OP_NWAKE_PRIVATE */
3138 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
3142 _umtx_op(struct thread *td, struct _umtx_op_args *uap)
3144 if ((unsigned)uap->op < UMTX_OP_MAX)
3145 return (*op_table[uap->op])(td, uap);
3149 #ifdef COMPAT_FREEBSD32
3151 freebsd32_umtx_lock(struct thread *td, struct freebsd32_umtx_lock_args *uap)
3152 /* struct umtx *umtx */
3154 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
3158 freebsd32_umtx_unlock(struct thread *td, struct freebsd32_umtx_unlock_args *uap)
3159 /* struct umtx *umtx */
3161 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
3170 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
3172 struct timespec32 ts32;
3175 error = copyin(addr, &ts32, sizeof(struct timespec32));
3177 if (ts32.tv_sec < 0 ||
3178 ts32.tv_nsec >= 1000000000 ||
3182 tsp->tv_sec = ts32.tv_sec;
3183 tsp->tv_nsec = ts32.tv_nsec;
3190 __umtx_op_lock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3192 struct timespec *ts, timeout;
3195 /* Allow a null timespec (wait forever). */
3196 if (uap->uaddr2 == NULL)
3199 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3204 return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3208 __umtx_op_unlock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3210 return (do_unlock_umtx32(td, uap->obj, (uint32_t)uap->val));
3214 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3216 struct timespec *ts, timeout;
3219 if (uap->uaddr2 == NULL)
3222 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3227 return do_wait(td, uap->obj, uap->val, ts, 1, 0);
3231 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3233 struct timespec *ts, timeout;
3236 /* Allow a null timespec (wait forever). */
3237 if (uap->uaddr2 == NULL)
3240 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3245 return do_lock_umutex(td, uap->obj, ts, 0);
3249 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3251 struct timespec *ts, timeout;
3254 /* Allow a null timespec (wait forever). */
3255 if (uap->uaddr2 == NULL)
3258 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3263 return do_lock_umutex(td, uap->obj, ts, _UMUTEX_WAIT);
3267 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3269 struct timespec *ts, timeout;
3272 /* Allow a null timespec (wait forever). */
3273 if (uap->uaddr2 == NULL)
3276 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3281 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3285 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3287 struct timespec timeout;
3290 /* Allow a null timespec (wait forever). */
3291 if (uap->uaddr2 == NULL) {
3292 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3294 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3297 error = do_rw_rdlock2(td, uap->obj, uap->val, &timeout);
3303 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3305 struct timespec timeout;
3308 /* Allow a null timespec (wait forever). */
3309 if (uap->uaddr2 == NULL) {
3310 error = do_rw_wrlock(td, uap->obj, 0);
3312 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3316 error = do_rw_wrlock2(td, uap->obj, &timeout);
3322 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3324 struct timespec *ts, timeout;
3327 if (uap->uaddr2 == NULL)
3330 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3335 return do_wait(td, uap->obj, uap->val, ts, 1, 1);
3338 static _umtx_op_func op_table_compat32[] = {
3339 __umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */
3340 __umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */
3341 __umtx_op_wait_compat32, /* UMTX_OP_WAIT */
3342 __umtx_op_wake, /* UMTX_OP_WAKE */
3343 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_LOCK */
3344 __umtx_op_lock_umutex_compat32, /* UMTX_OP_MUTEX_TRYLOCK */
3345 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
3346 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
3347 __umtx_op_cv_wait_compat32, /* UMTX_OP_CV_WAIT*/
3348 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
3349 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
3350 __umtx_op_wait_compat32, /* UMTX_OP_WAIT_UINT */
3351 __umtx_op_rw_rdlock_compat32, /* UMTX_OP_RW_RDLOCK */
3352 __umtx_op_rw_wrlock_compat32, /* UMTX_OP_RW_WRLOCK */
3353 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
3354 __umtx_op_wait_uint_private_compat32, /* UMTX_OP_WAIT_UINT_PRIVATE */
3355 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
3356 __umtx_op_wait_umutex_compat32, /* UMTX_OP_UMUTEX_WAIT */
3357 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
3358 __umtx_op_not_sup, /* UMTX_OP_SEM_WAIT */
3359 __umtx_op_not_sup, /* UMTX_OP_SEM_WAKE */
3360 __umtx_op_not_sup, /* UMTX_OP_NWAKE_PRIVATE */
3361 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
3365 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
3367 if ((unsigned)uap->op < UMTX_OP_MAX)
3368 return (*op_table_compat32[uap->op])(td,
3369 (struct _umtx_op_args *)uap);
3375 umtx_thread_init(struct thread *td)
3377 td->td_umtxq = umtxq_alloc();
3378 td->td_umtxq->uq_thread = td;
3382 umtx_thread_fini(struct thread *td)
3384 umtxq_free(td->td_umtxq);
3388 * It will be called when new thread is created, e.g fork().
3391 umtx_thread_alloc(struct thread *td)
3396 uq->uq_inherited_pri = PRI_MAX;
3398 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
3399 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
3400 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
3401 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
3408 umtx_exec_hook(void *arg __unused, struct proc *p __unused,
3409 struct image_params *imgp __unused)
3411 umtx_thread_cleanup(curthread);
3415 * thread_exit() hook.
3418 umtx_thread_exit(struct thread *td)
3420 umtx_thread_cleanup(td);
3424 * clean up umtx data.
3427 umtx_thread_cleanup(struct thread *td)
3432 if ((uq = td->td_umtxq) == NULL)
3435 mtx_lock_spin(&umtx_lock);
3436 uq->uq_inherited_pri = PRI_MAX;
3437 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
3438 pi->pi_owner = NULL;
3439 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
3442 td->td_flags &= ~TDF_UBORROWING;
3444 mtx_unlock_spin(&umtx_lock);