1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Emmanuel Dreyfus
17 * 4. The name of the author may not be used to endorse or promote
18 * products derived from this software without specific prior written
21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $");
40 #include "opt_compat.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/imgact.h>
45 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
52 #include <sys/queue.h>
53 #include <sys/sched.h>
58 #include <machine/../linux32/linux.h>
59 #include <machine/../linux32/linux32_proto.h>
61 #include <machine/../linux/linux.h>
62 #include <machine/../linux/linux_proto.h>
64 #include <compat/linux/linux_emul.h>
65 #include <compat/linux/linux_futex.h>
66 #include <compat/linux/linux_util.h>
68 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes");
69 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp");
75 struct futex *wp_futex;
76 TAILQ_ENTRY(waiting_proc) wp_list;
81 uint32_t *f_uaddr; /* user-supplied value, for debug */
82 struct umtx_key f_key;
85 LIST_ENTRY(futex) f_list;
86 TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc;
89 struct futex_list futex_list;
91 #define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck)
92 #define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck)
93 #define FUTEX_INIT(f) sx_init_flags(&(f)->f_lck, "ftlk", SX_DUPOK)
94 #define FUTEX_DESTROY(f) sx_destroy(&(f)->f_lck)
95 #define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED)
97 struct mtx futex_mtx; /* protects the futex list */
98 #define FUTEXES_LOCK mtx_lock(&futex_mtx)
99 #define FUTEXES_UNLOCK mtx_unlock(&futex_mtx)
101 /* flags for futex_get() */
102 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */
103 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */
104 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */
105 #define FUTEX_SHARED 0x8 /* shared futex */
108 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list
109 * of futex where thread sleep to wp_list
112 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex
113 * wp_list to prevent double wakeup.
117 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval);
118 int futex_addl(int oparg, uint32_t *uaddr, int *oldval);
119 int futex_orl(int oparg, uint32_t *uaddr, int *oldval);
120 int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
121 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
124 futex_put(struct futex *f, struct waiting_proc *wp)
127 FUTEX_ASSERT_LOCKED(f);
129 if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
130 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
131 free(wp, M_FUTEX_WP);
135 if (--f->f_refcount == 0) {
136 LIST_REMOVE(f, f_list);
140 LINUX_CTR3(sys_futex, "futex_put destroy uaddr %p ref %d "
141 "shared %d", f->f_uaddr, f->f_refcount, f->f_key.shared);
142 umtx_key_release(&f->f_key);
148 LINUX_CTR3(sys_futex, "futex_put uaddr %p ref %d shared %d",
149 f->f_uaddr, f->f_refcount, f->f_key.shared);
155 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags)
157 struct futex *f, *tmpf;
163 error = umtx_key_get(uaddr, TYPE_FUTEX, (flags & FUTEX_SHARED) ?
164 AUTO_SHARE : THREAD_SHARE, &key);
169 LIST_FOREACH(f, &futex_list, f_list) {
170 if (umtx_key_match(&f->f_key, &key)) {
176 if (flags & FUTEX_DONTEXISTS) {
178 umtx_key_release(&key);
183 * Increment refcount of the found futex to
184 * prevent it from deallocation before FUTEX_LOCK()
188 umtx_key_release(&key);
192 LINUX_CTR3(sys_futex, "futex_get uaddr %p ref %d shared %d",
193 uaddr, f->f_refcount, f->f_key.shared);
198 if (flags & FUTEX_DONTCREATE) {
200 umtx_key_release(&key);
201 LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr);
207 tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO);
208 tmpf->f_uaddr = uaddr;
210 tmpf->f_refcount = 1;
211 tmpf->f_bitset = FUTEX_BITSET_MATCH_ANY;
213 TAILQ_INIT(&tmpf->f_waiting_proc);
216 * Lock the new futex before an insert into the futex_list
217 * to prevent futex usage by other.
223 LIST_INSERT_HEAD(&futex_list, tmpf, f_list);
226 LINUX_CTR3(sys_futex, "futex_get uaddr %p ref %d shared %d new",
227 uaddr, tmpf->f_refcount, tmpf->f_key.shared);
233 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
238 if (flags & FUTEX_CREATE_WP) {
239 *wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK);
242 error = futex_get0(uaddr, f, flags);
244 if (flags & FUTEX_CREATE_WP)
245 free(*wp, M_FUTEX_WP);
248 if (flags & FUTEX_CREATE_WP) {
249 TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list);
250 (*wp)->wp_futex = *f;
257 futex_sleep(struct futex *f, struct waiting_proc *wp, unsigned long timeout)
261 FUTEX_ASSERT_LOCKED(f);
262 LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
263 f->f_uaddr, wp, timeout, f->f_refcount);
264 error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
265 if (wp->wp_flags & FUTEX_WP_REQUEUED) {
266 KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
267 LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w"
268 " %p requeued uaddr %p ref %d",
269 error, f->f_uaddr, wp, wp->wp_futex->f_uaddr,
270 wp->wp_futex->f_refcount);
275 LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p",
276 error, f->f_uaddr, wp);
283 futex_wake(struct futex *f, int n, uint32_t bitset)
285 struct waiting_proc *wp, *wpt;
291 FUTEX_ASSERT_LOCKED(f);
292 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
293 LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d",
294 f->f_uaddr, wp, f->f_refcount);
296 * Unless we find a matching bit in
297 * the bitset, continue searching.
299 if (!(wp->wp_futex->f_bitset & bitset))
302 wp->wp_flags |= FUTEX_WP_REMOVED;
303 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
313 futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
315 struct waiting_proc *wp, *wpt;
318 FUTEX_ASSERT_LOCKED(f);
319 FUTEX_ASSERT_LOCKED(f2);
321 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
323 LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p",
325 wp->wp_flags |= FUTEX_WP_REMOVED;
326 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
329 LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p",
330 f->f_uaddr, wp, f2->f_uaddr);
331 wp->wp_flags |= FUTEX_WP_REQUEUED;
332 /* Move wp to wp_list of f2 futex */
333 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
334 TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list);
337 * Thread which sleeps on wp after waking should
338 * acquire f2 lock, so increment refcount of f2 to
339 * prevent it from premature deallocation.
354 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts,
357 struct l_timespec timeout = {0, 0};
358 struct timeval tv = {0, 0};
364 f->f_bitset = bitset;
367 error = copyin(ts, &timeout, sizeof(timeout));
372 tv.tv_usec = timeout.tv_sec * 1000000 + timeout.tv_nsec / 1000;
373 timeout_hz = tvtohz(&tv);
375 if (timeout.tv_sec == 0 && timeout.tv_nsec == 0)
379 * If the user process requests a non null timeout,
380 * make sure we do not turn it into an infinite
381 * timeout because timeout_hz gets null.
383 * We use a minimal timeout of 1/hz. Maybe it would
384 * make sense to just return ETIMEDOUT without sleeping.
386 if (((timeout.tv_sec != 0) || (timeout.tv_nsec != 0)) &&
390 error = futex_sleep(f, wp, timeout_hz);
391 if (error == EWOULDBLOCK)
398 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
400 int op = (encoded_op >> 28) & 7;
401 int cmp = (encoded_op >> 24) & 15;
402 int oparg = (encoded_op << 8) >> 20;
403 int cmparg = (encoded_op << 20) >> 20;
406 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
410 if (ldebug(sys_futex))
411 printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, "
412 "cmparg = %x, uaddr = %p\n",
413 op, cmp, oparg, cmparg, uaddr);
415 /* XXX: Linux verifies access here and returns EFAULT */
419 ret = futex_xchgl(oparg, uaddr, &oldval);
422 ret = futex_addl(oparg, uaddr, &oldval);
425 ret = futex_orl(oparg, uaddr, &oldval);
428 ret = futex_andl(~oparg, uaddr, &oldval);
431 ret = futex_xorl(oparg, uaddr, &oldval);
442 case FUTEX_OP_CMP_EQ:
443 return (oldval == cmparg);
444 case FUTEX_OP_CMP_NE:
445 return (oldval != cmparg);
446 case FUTEX_OP_CMP_LT:
447 return (oldval < cmparg);
448 case FUTEX_OP_CMP_GE:
449 return (oldval >= cmparg);
450 case FUTEX_OP_CMP_LE:
451 return (oldval <= cmparg);
452 case FUTEX_OP_CMP_GT:
453 return (oldval > cmparg);
460 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
462 int clockrt, nrwake, op_ret, ret, val;
463 struct linux_emuldata *em;
464 struct waiting_proc *wp;
465 struct futex *f, *f2;
469 if (args->op & LINUX_FUTEX_PRIVATE_FLAG) {
471 args->op &= ~LINUX_FUTEX_PRIVATE_FLAG;
473 flags = FUTEX_SHARED;
476 * Currently support for switching between CLOCK_MONOTONIC and
477 * CLOCK_REALTIME is not present. However Linux forbids the use of
478 * FUTEX_CLOCK_REALTIME with any op except FUTEX_WAIT_BITSET and
479 * FUTEX_WAIT_REQUEUE_PI.
481 clockrt = args->op & LINUX_FUTEX_CLOCK_REALTIME;
482 args->op = args->op & ~LINUX_FUTEX_CLOCK_REALTIME;
483 if (clockrt && args->op != LINUX_FUTEX_WAIT_BITSET &&
484 args->op != LINUX_FUTEX_WAIT_REQUEUE_PI)
491 case LINUX_FUTEX_WAIT:
492 args->val3 = FUTEX_BITSET_MATCH_ANY;
495 case LINUX_FUTEX_WAIT_BITSET:
497 LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d val3 %d",
498 args->uaddr, args->val, args->val3);
500 if (ldebug(sys_futex))
501 printf(ARGS(sys_futex,
502 "futex_wait uaddr %p val %d val3 %d"),
503 args->uaddr, args->val, args->val3);
505 error = futex_get(args->uaddr, &wp, &f,
506 flags | FUTEX_CREATE_WP);
509 error = copyin(args->uaddr, &val, sizeof(val));
511 LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
516 if (val != args->val) {
517 LINUX_CTR4(sys_futex,
518 "WAIT uaddr %p val %d != uval %d val3 %d",
519 args->uaddr, args->val, val, args->val3);
521 return (EWOULDBLOCK);
524 error = futex_wait(f, wp, args->timeout, args->val3);
527 case LINUX_FUTEX_WAKE:
528 args->val3 = FUTEX_BITSET_MATCH_ANY;
531 case LINUX_FUTEX_WAKE_BITSET:
533 LINUX_CTR3(sys_futex, "WAKE uaddr %p val % d val3 %d",
534 args->uaddr, args->val, args->val3);
537 if (ldebug(sys_futex))
538 printf(ARGS(sys_futex, "futex_wake uaddr %p val %d val3 %d"),
539 args->uaddr, args->val, args->val3);
541 error = futex_get(args->uaddr, NULL, &f,
542 flags | FUTEX_DONTCREATE);
546 td->td_retval[0] = 0;
549 td->td_retval[0] = futex_wake(f, args->val, args->val3);
553 case LINUX_FUTEX_CMP_REQUEUE:
555 LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p "
556 "val %d val3 %d uaddr2 %p val2 %d",
557 args->uaddr, args->val, args->val3, args->uaddr2,
558 (int)(unsigned long)args->timeout);
561 if (ldebug(sys_futex))
562 printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p "
563 "val %d val3 %d uaddr2 %p val2 %d"),
564 args->uaddr, args->val, args->val3, args->uaddr2,
565 (int)(unsigned long)args->timeout);
569 * Linux allows this, we would not, it is an incorrect
570 * usage of declared ABI, so return EINVAL.
572 if (args->uaddr == args->uaddr2)
574 error = futex_get(args->uaddr, NULL, &f, flags);
579 * To avoid deadlocks return EINVAL if second futex
580 * exists at this time.
582 * Glibc fall back to FUTEX_WAKE in case of any error
583 * returned by FUTEX_CMP_REQUEUE.
585 error = futex_get(args->uaddr2, NULL, &f2,
586 flags | FUTEX_DONTEXISTS);
591 error = copyin(args->uaddr, &val, sizeof(val));
593 LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
599 if (val != args->val3) {
600 LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d",
607 nrwake = (int)(unsigned long)args->timeout;
608 td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake);
613 case LINUX_FUTEX_WAKE_OP:
615 LINUX_CTR5(sys_futex, "WAKE_OP "
616 "uaddr %p op %d val %x uaddr2 %p val3 %x",
617 args->uaddr, args->op, args->val,
618 args->uaddr2, args->val3);
621 if (ldebug(sys_futex))
622 printf(ARGS(sys_futex, "futex_wake_op "
623 "uaddr %p op %d val %x uaddr2 %p val3 %x"),
624 args->uaddr, args->op, args->val,
625 args->uaddr2, args->val3);
627 error = futex_get(args->uaddr, NULL, &f, flags);
630 if (args->uaddr != args->uaddr2)
631 error = futex_get(args->uaddr2, NULL, &f2, flags);
638 * This function returns positive number as results and
641 op_ret = futex_atomic_op(td, args->val3, args->uaddr2);
644 /* XXX: We don't handle the EFAULT yet. */
645 if (op_ret != -EFAULT) {
657 ret = futex_wake(f, args->val, args->val3);
661 nrwake = (int)(unsigned long)args->timeout;
664 op_ret += futex_wake(f2, nrwake, args->val3);
666 op_ret += futex_wake(f, nrwake, args->val3);
673 td->td_retval[0] = ret;
676 case LINUX_FUTEX_LOCK_PI:
677 /* not yet implemented */
680 "op LINUX_FUTEX_LOCK_PI not implemented\n");
683 case LINUX_FUTEX_UNLOCK_PI:
684 /* not yet implemented */
687 "op LINUX_FUTEX_UNLOCK_PI not implemented\n");
690 case LINUX_FUTEX_TRYLOCK_PI:
691 /* not yet implemented */
694 "op LINUX_FUTEX_TRYLOCK_PI not implemented\n");
697 case LINUX_FUTEX_REQUEUE:
700 * Glibc does not use this operation since version 2.3.3,
701 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
702 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when
703 * FUTEX_REQUEUE returned EINVAL.
705 em = em_find(td->td_proc, EMUL_DONTLOCK);
706 if ((em->flags & LINUX_XDEPR_REQUEUEOP) == 0) {
709 "unsupported futex_requeue op\n");
710 em->flags |= LINUX_XDEPR_REQUEUEOP;
714 case LINUX_FUTEX_WAIT_REQUEUE_PI:
715 /* not yet implemented */
718 "op FUTEX_WAIT_REQUEUE_PI not implemented\n");
721 case LINUX_FUTEX_CMP_REQUEUE_PI:
722 /* not yet implemented */
725 "op LINUX_FUTEX_CMP_REQUEUE_PI not implemented\n");
730 "linux_sys_futex: unknown op %d\n", args->op);
738 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args)
740 struct linux_emuldata *em;
743 if (ldebug(set_robust_list))
744 printf(ARGS(set_robust_list, "head %p len %d"),
745 args->head, args->len);
748 if (args->len != sizeof(struct linux_robust_list_head))
751 em = em_find(td->td_proc, EMUL_DOLOCK);
752 em->robust_futexes = args->head;
753 EMUL_UNLOCK(&emul_lock);
759 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args)
761 struct linux_emuldata *em;
762 struct linux_robust_list_head *head;
763 l_size_t len = sizeof(struct linux_robust_list_head);
767 if (ldebug(get_robust_list))
768 printf(ARGS(get_robust_list, ""));
772 em = em_find(td->td_proc, EMUL_DONTLOCK);
773 head = em->robust_futexes;
777 p = pfind(args->pid);
781 em = em_find(p, EMUL_DONTLOCK);
783 if (priv_check(td, PRIV_CRED_SETUID) ||
784 priv_check(td, PRIV_CRED_SETEUID) ||
789 head = em->robust_futexes;
794 error = copyout(&len, args->len, sizeof(l_size_t));
798 error = copyout(head, args->head, sizeof(struct linux_robust_list_head));
804 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi)
806 uint32_t uval, nval, mval;
811 if (copyin(uaddr, &uval, 4))
813 if ((uval & FUTEX_TID_MASK) == p->p_pid) {
814 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
815 nval = casuword32(uaddr, uval, mval);
823 if (!pi && (uval & FUTEX_WAITERS)) {
824 error = futex_get(uaddr, NULL, &f,
825 FUTEX_DONTCREATE | FUTEX_SHARED);
829 futex_wake(f, 1, FUTEX_BITSET_MATCH_ANY);
839 fetch_robust_entry(struct linux_robust_list **entry,
840 struct linux_robust_list **head, int *pi)
844 if (copyin((const void *)head, &uentry, sizeof(l_ulong)))
847 *entry = (void *)(uentry & ~1UL);
853 /* This walks the list of robust futexes releasing them. */
855 release_futexes(struct proc *p)
857 struct linux_robust_list_head *head = NULL;
858 struct linux_robust_list *entry, *next_entry, *pending;
859 unsigned int limit = 2048, pi, next_pi, pip;
860 struct linux_emuldata *em;
864 em = em_find(p, EMUL_DONTLOCK);
865 head = em->robust_futexes;
870 if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi))
873 if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset)))
876 if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip))
879 while (entry != &head->list) {
880 rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi);
882 if (entry != pending)
883 if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi))
894 sched_relinquish(curthread);
898 handle_futex_death(p, (uint32_t *)pending + futex_offset, pip);