2 * Copyright (c) 2017 Mellanox Technologies, Ltd.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/queue.h>
31 #include <linux/ww_mutex.h>
33 struct ww_mutex_thread {
34 TAILQ_ENTRY(ww_mutex_thread) entry;
35 struct thread *thread;
36 struct ww_mutex *lock;
39 static TAILQ_HEAD(, ww_mutex_thread) ww_mutex_head;
40 static struct mtx ww_mutex_global;
43 linux_ww_init(void *arg)
45 TAILQ_INIT(&ww_mutex_head);
46 mtx_init(&ww_mutex_global, "lkpi-ww-mtx", NULL, MTX_DEF);
49 SYSINIT(ww_init, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_init, NULL);
52 linux_ww_uninit(void *arg)
54 mtx_destroy(&ww_mutex_global);
57 SYSUNINIT(ww_uninit, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_uninit, NULL);
62 mtx_lock(&ww_mutex_global);
68 mtx_unlock(&ww_mutex_global);
71 /* lock a mutex with deadlock avoidance */
73 linux_ww_mutex_lock_sub(struct ww_mutex *lock, int catch_signal)
75 struct ww_mutex_thread entry;
76 struct ww_mutex_thread *other;
80 if (unlikely(sx_try_xlock(&lock->base.sx) == 0)) {
81 entry.thread = curthread;
83 TAILQ_INSERT_TAIL(&ww_mutex_head, &entry, entry);
86 struct thread *owner = (struct thread *)
87 SX_OWNER(lock->base.sx.sx_lock);
89 /* scan for deadlock */
90 TAILQ_FOREACH(other, &ww_mutex_head, entry) {
95 * If another thread is owning our
96 * lock and is at the same time trying
97 * to acquire a lock this thread owns,
98 * that means deadlock.
100 if (other->thread == owner &&
101 (struct thread *)SX_OWNER(
102 other->lock->base.sx.sx_lock) == curthread) {
108 if (cv_wait_sig(&lock->condvar, &ww_mutex_global) != 0) {
113 cv_wait(&lock->condvar, &ww_mutex_global);
115 } while (sx_try_xlock(&lock->base.sx) == 0);
117 TAILQ_REMOVE(&ww_mutex_head, &entry, entry);
119 /* if the lock is free, wakeup next lock waiter, if any */
120 if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == NULL)
121 cv_signal(&lock->condvar);
128 linux_ww_mutex_unlock_sub(struct ww_mutex *lock)
130 /* protect ww_mutex ownership change */
132 sx_xunlock(&lock->base.sx);
133 /* wakeup a lock waiter, if any */
134 cv_signal(&lock->condvar);