2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice(s), this list of conditions and the following disclaimer as
9 * the first lines of this file unmodified other than the possible
10 * addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice(s), this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * Shared/exclusive locks. This implementation assures deterministic lock
32 * granting behavior, so that slocks and xlocks are interleaved.
34 * Priority propagation will not generally raise the priority of lock holders,
35 * so should not be relied upon in combination with sx locks.
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/condvar.h>
43 #include <sys/mutex.h>
46 struct lock_class lock_class_sx = {
48 LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE
52 #define _sx_assert(sx, what, file, line)
56 sx_init(struct sx *sx, const char *description)
58 struct lock_object *lock;
60 lock = &sx->sx_object;
61 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
62 ("sx lock %s %p already initialized", description, sx));
63 bzero(sx, sizeof(*sx));
64 lock->lo_class = &lock_class_sx;
65 lock->lo_name = description;
66 lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
68 sx->sx_lock = mtx_pool_find(sx);
70 cv_init(&sx->sx_shrd_cv, description);
72 cv_init(&sx->sx_excl_cv, description);
74 sx->sx_xholder = NULL;
76 LOCK_LOG_INIT(lock, 0);
82 sx_destroy(struct sx *sx)
85 LOCK_LOG_DESTROY(&sx->sx_object, 0);
87 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
88 0), ("%s (%s): holders or waiters\n", __func__,
89 sx->sx_object.lo_name));
92 cv_destroy(&sx->sx_shrd_cv);
93 cv_destroy(&sx->sx_excl_cv);
95 WITNESS_DESTROY(&sx->sx_object);
99 _sx_slock(struct sx *sx, const char *file, int line)
102 mtx_lock(sx->sx_lock);
103 KASSERT(sx->sx_xholder != curthread,
104 ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
105 sx->sx_object.lo_name, file, line));
108 * Loop in case we lose the race for lock acquisition.
110 while (sx->sx_cnt < 0) {
112 cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
116 /* Acquire a shared lock. */
119 LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
120 WITNESS_LOCK(&sx->sx_object, 0, file, line);
122 mtx_unlock(sx->sx_lock);
126 _sx_try_slock(struct sx *sx, const char *file, int line)
129 mtx_lock(sx->sx_lock);
130 if (sx->sx_cnt >= 0) {
132 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
133 WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
134 mtx_unlock(sx->sx_lock);
137 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
138 mtx_unlock(sx->sx_lock);
144 _sx_xlock(struct sx *sx, const char *file, int line)
147 mtx_lock(sx->sx_lock);
150 * With sx locks, we're absolutely not permitted to recurse on
151 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
152 * by WITNESS, but as it is not semantically correct to hold the
153 * xlock while in here, we consider it API abuse and put it under
156 KASSERT(sx->sx_xholder != curthread,
157 ("%s (%s): xlock already held @ %s:%d", __func__,
158 sx->sx_object.lo_name, file, line));
160 /* Loop in case we lose the race for lock acquisition. */
161 while (sx->sx_cnt != 0) {
163 cv_wait(&sx->sx_excl_cv, sx->sx_lock);
167 MPASS(sx->sx_cnt == 0);
169 /* Acquire an exclusive lock. */
171 sx->sx_xholder = curthread;
173 LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
174 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
176 mtx_unlock(sx->sx_lock);
180 _sx_try_xlock(struct sx *sx, const char *file, int line)
183 mtx_lock(sx->sx_lock);
184 if (sx->sx_cnt == 0) {
186 sx->sx_xholder = curthread;
187 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
188 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
190 mtx_unlock(sx->sx_lock);
193 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
194 mtx_unlock(sx->sx_lock);
200 _sx_sunlock(struct sx *sx, const char *file, int line)
203 _sx_assert(sx, SX_SLOCKED, file, line);
204 mtx_lock(sx->sx_lock);
206 WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
212 * If we just released the last shared lock, wake any waiters up, giving
213 * exclusive lockers precedence. In order to make sure that exclusive
214 * lockers won't be blocked forever, don't wake shared lock waiters if
215 * there are exclusive lock waiters.
217 if (sx->sx_excl_wcnt > 0) {
219 cv_signal(&sx->sx_excl_cv);
220 } else if (sx->sx_shrd_wcnt > 0)
221 cv_broadcast(&sx->sx_shrd_cv);
223 LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
225 mtx_unlock(sx->sx_lock);
229 _sx_xunlock(struct sx *sx, const char *file, int line)
232 _sx_assert(sx, SX_XLOCKED, file, line);
233 mtx_lock(sx->sx_lock);
234 MPASS(sx->sx_cnt == -1);
236 WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
240 sx->sx_xholder = NULL;
243 * Wake up waiters if there are any. Give precedence to slock waiters.
245 if (sx->sx_shrd_wcnt > 0)
246 cv_broadcast(&sx->sx_shrd_cv);
247 else if (sx->sx_excl_wcnt > 0)
248 cv_signal(&sx->sx_excl_cv);
250 LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
252 mtx_unlock(sx->sx_lock);
256 _sx_try_upgrade(struct sx *sx, const char *file, int line)
259 _sx_assert(sx, SX_SLOCKED, file, line);
260 mtx_lock(sx->sx_lock);
262 if (sx->sx_cnt == 1) {
264 sx->sx_xholder = curthread;
266 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
267 WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
270 mtx_unlock(sx->sx_lock);
273 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
274 mtx_unlock(sx->sx_lock);
280 _sx_downgrade(struct sx *sx, const char *file, int line)
283 _sx_assert(sx, SX_XLOCKED, file, line);
284 mtx_lock(sx->sx_lock);
285 MPASS(sx->sx_cnt == -1);
287 WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
290 sx->sx_xholder = NULL;
291 if (sx->sx_shrd_wcnt > 0)
292 cv_broadcast(&sx->sx_shrd_cv);
294 LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
296 mtx_unlock(sx->sx_lock);
299 #ifdef INVARIANT_SUPPORT
305 * In the non-WITNESS case, sx_assert() can only detect that at least
306 * *some* thread owns an slock, but it cannot guarantee that *this*
307 * thread owns an slock.
310 _sx_assert(struct sx *sx, int what, const char *file, int line)
317 witness_assert(&sx->sx_object, what, file, line);
319 mtx_lock(sx->sx_lock);
320 if (sx->sx_cnt <= 0 &&
321 (what == SX_SLOCKED || sx->sx_xholder != curthread))
322 printf("Lock %s not %slocked @ %s:%d\n",
323 sx->sx_object.lo_name, (what == SX_SLOCKED) ?
324 "share " : "", file, line);
325 mtx_unlock(sx->sx_lock);
329 mtx_lock(sx->sx_lock);
330 if (sx->sx_xholder != curthread)
331 printf("Lock %s not exclusively locked @ %s:%d\n",
332 sx->sx_object.lo_name, file, line);
333 mtx_unlock(sx->sx_lock);
336 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
340 #endif /* INVARIANT_SUPPORT */