]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/compat/linuxkpi/common/src/linux_lock.c
Fix kernel panic in LinuxKPI subsystem.
[FreeBSD/FreeBSD.git] / sys / compat / linuxkpi / common / src / linux_lock.c
1 /*-
2  * Copyright (c) 2017 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/queue.h>
30
31 #include <linux/sched.h>
32 #include <linux/ww_mutex.h>
33
34 struct ww_mutex_thread {
35         TAILQ_ENTRY(ww_mutex_thread) entry;
36         struct thread *thread;
37         struct ww_mutex *lock;
38 };
39
40 static TAILQ_HEAD(, ww_mutex_thread) ww_mutex_head;
41 static struct mtx ww_mutex_global;
42
43 static void
44 linux_ww_init(void *arg)
45 {
46         TAILQ_INIT(&ww_mutex_head);
47         mtx_init(&ww_mutex_global, "lkpi-ww-mtx", NULL, MTX_DEF);
48 }
49
50 SYSINIT(ww_init, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_init, NULL);
51
52 static void
53 linux_ww_uninit(void *arg)
54 {
55         mtx_destroy(&ww_mutex_global);
56 }
57
58 SYSUNINIT(ww_uninit, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_uninit, NULL);
59
60 static inline void
61 linux_ww_lock(void)
62 {
63         mtx_lock(&ww_mutex_global);
64 }
65
66 static inline void
67 linux_ww_unlock(void)
68 {
69         mtx_unlock(&ww_mutex_global);
70 }
71
72 /* lock a mutex with deadlock avoidance */
73 int
74 linux_ww_mutex_lock_sub(struct ww_mutex *lock, int catch_signal)
75 {
76         struct task_struct *task;
77         struct ww_mutex_thread entry;
78         struct ww_mutex_thread *other;
79         int retval = 0;
80
81         task = current;
82
83         linux_ww_lock();
84         if (unlikely(sx_try_xlock(&lock->base.sx) == 0)) {
85                 entry.thread = curthread;
86                 entry.lock = lock;
87                 TAILQ_INSERT_TAIL(&ww_mutex_head, &entry, entry);
88
89                 do {
90                         struct thread *owner = (struct thread *)
91                             SX_OWNER(lock->base.sx.sx_lock);
92
93                         /* scan for deadlock */
94                         TAILQ_FOREACH(other, &ww_mutex_head, entry) {
95                                 /* skip own thread */
96                                 if (other == &entry)
97                                         continue;
98                                 /*
99                                  * If another thread is owning our
100                                  * lock and is at the same time trying
101                                  * to acquire a lock this thread owns,
102                                  * that means deadlock.
103                                  */
104                                 if (other->thread == owner &&
105                                     (struct thread *)SX_OWNER(
106                                     other->lock->base.sx.sx_lock) == curthread) {
107                                         retval = -EDEADLK;
108                                         goto done;
109                                 }
110                         }
111                         if (catch_signal) {
112                                 retval = -cv_wait_sig(&lock->condvar, &ww_mutex_global);
113                                 if (retval != 0) {
114                                         linux_schedule_save_interrupt_value(task, retval);
115                                         retval = -EINTR;
116                                         goto done;
117                                 }
118                         } else {
119                                 cv_wait(&lock->condvar, &ww_mutex_global);
120                         }
121                 } while (sx_try_xlock(&lock->base.sx) == 0);
122 done:
123                 TAILQ_REMOVE(&ww_mutex_head, &entry, entry);
124
125                 /* if the lock is free, wakeup next lock waiter, if any */
126                 if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == NULL)
127                         cv_signal(&lock->condvar);
128         }
129         linux_ww_unlock();
130         return (retval);
131 }
132
133 void
134 linux_ww_mutex_unlock_sub(struct ww_mutex *lock)
135 {
136         /* protect ww_mutex ownership change */
137         linux_ww_lock();
138         sx_xunlock(&lock->base.sx);
139         /* wakeup a lock waiter, if any */
140         cv_signal(&lock->condvar);
141         linux_ww_unlock();
142 }
143
144 int
145 linux_mutex_lock_interruptible(mutex_t *m)
146 {
147         int error;
148
149         error = -sx_xlock_sig(&m->sx);
150         if (error != 0) {
151                 linux_schedule_save_interrupt_value(current, error);
152                 error = -EINTR;
153         }
154         return (error);
155 }
156
157 int
158 linux_down_write_killable(struct rw_semaphore *rw)
159 {
160         int error;
161
162         error = -sx_xlock_sig(&rw->sx);
163         if (error != 0) {
164                 linux_schedule_save_interrupt_value(current, error);
165                 error = -EINTR;
166         }
167         return (error);
168 }