2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice(s), this list of conditions and the following disclaimer as
9 * the first lines of this file unmodified other than the possible
10 * addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice(s), this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 * Shared/exclusive locks. This implementation assures deterministic lock
30 * granting behavior, so that slocks and xlocks are interleaved.
32 * Priority propagation will not generally raise the priority of lock holders,
33 * so should not be relied upon in combination with sx locks.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/linker_set.h>
45 #include <sys/condvar.h>
47 #include <sys/mutex.h>
50 #include <sys/lock_profile.h>
55 static void db_show_sx(struct lock_object *lock);
58 struct lock_class lock_class_sx = {
60 LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
67 #define _sx_assert(sx, what, file, line)
73 struct sx_args *sargs = arg;
75 sx_init(sargs->sa_sx, sargs->sa_desc);
79 sx_init(struct sx *sx, const char *description)
82 sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
84 cv_init(&sx->sx_shrd_cv, description);
86 cv_init(&sx->sx_excl_cv, description);
88 sx->sx_xholder = NULL;
89 lock_profile_object_init(&sx->sx_object, &lock_class_sx, description);
90 lock_init(&sx->sx_object, &lock_class_sx, description, NULL,
91 LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
95 sx_destroy(struct sx *sx)
98 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
99 0), ("%s (%s): holders or waiters\n", __func__,
100 sx->sx_object.lo_name));
103 cv_destroy(&sx->sx_shrd_cv);
104 cv_destroy(&sx->sx_excl_cv);
106 lock_profile_object_destroy(&sx->sx_object);
107 lock_destroy(&sx->sx_object);
111 _sx_slock(struct sx *sx, const char *file, int line)
113 uint64_t waittime = 0;
116 mtx_lock(sx->sx_lock);
117 KASSERT(sx->sx_xholder != curthread,
118 ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
119 sx->sx_object.lo_name, file, line));
120 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line);
123 * Loop in case we lose the race for lock acquisition.
126 lock_profile_waitstart(&waittime);
127 while (sx->sx_cnt < 0) {
129 lock_profile_obtain_lock_failed(&sx->sx_object, &contested);
130 cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
134 /* Acquire a shared lock. */
138 lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line);
140 LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
141 WITNESS_LOCK(&sx->sx_object, 0, file, line);
142 curthread->td_locks++;
144 mtx_unlock(sx->sx_lock);
148 _sx_try_slock(struct sx *sx, const char *file, int line)
151 mtx_lock(sx->sx_lock);
152 if (sx->sx_cnt >= 0) {
154 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
155 WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
156 curthread->td_locks++;
157 mtx_unlock(sx->sx_lock);
160 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
161 mtx_unlock(sx->sx_lock);
167 _sx_xlock(struct sx *sx, const char *file, int line)
170 uint64_t waittime = 0;
172 mtx_lock(sx->sx_lock);
175 * With sx locks, we're absolutely not permitted to recurse on
176 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
177 * by WITNESS, but as it is not semantically correct to hold the
178 * xlock while in here, we consider it API abuse and put it under
181 KASSERT(sx->sx_xholder != curthread,
182 ("%s (%s): xlock already held @ %s:%d", __func__,
183 sx->sx_object.lo_name, file, line));
184 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
188 lock_profile_waitstart(&waittime);
189 /* Loop in case we lose the race for lock acquisition. */
190 while (sx->sx_cnt != 0) {
192 lock_profile_obtain_lock_failed(&sx->sx_object, &contested);
193 cv_wait(&sx->sx_excl_cv, sx->sx_lock);
197 MPASS(sx->sx_cnt == 0);
199 /* Acquire an exclusive lock. */
201 sx->sx_xholder = curthread;
203 lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line);
204 LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
205 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
206 curthread->td_locks++;
208 mtx_unlock(sx->sx_lock);
212 _sx_try_xlock(struct sx *sx, const char *file, int line)
215 mtx_lock(sx->sx_lock);
216 if (sx->sx_cnt == 0) {
218 sx->sx_xholder = curthread;
219 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
220 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
222 curthread->td_locks++;
223 mtx_unlock(sx->sx_lock);
226 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
227 mtx_unlock(sx->sx_lock);
233 _sx_sunlock(struct sx *sx, const char *file, int line)
236 _sx_assert(sx, SX_SLOCKED, file, line);
237 mtx_lock(sx->sx_lock);
239 curthread->td_locks--;
240 WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
246 lock_profile_release_lock(&sx->sx_object);
248 * If we just released the last shared lock, wake any waiters up, giving
249 * exclusive lockers precedence. In order to make sure that exclusive
250 * lockers won't be blocked forever, don't wake shared lock waiters if
251 * there are exclusive lock waiters.
253 if (sx->sx_excl_wcnt > 0) {
255 cv_signal(&sx->sx_excl_cv);
256 } else if (sx->sx_shrd_wcnt > 0)
257 cv_broadcast(&sx->sx_shrd_cv);
259 LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
261 mtx_unlock(sx->sx_lock);
265 _sx_xunlock(struct sx *sx, const char *file, int line)
268 _sx_assert(sx, SX_XLOCKED, file, line);
269 mtx_lock(sx->sx_lock);
270 MPASS(sx->sx_cnt == -1);
272 curthread->td_locks--;
273 WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
277 sx->sx_xholder = NULL;
279 lock_profile_release_lock(&sx->sx_object);
281 * Wake up waiters if there are any. Give precedence to slock waiters.
283 if (sx->sx_shrd_wcnt > 0)
284 cv_broadcast(&sx->sx_shrd_cv);
285 else if (sx->sx_excl_wcnt > 0)
286 cv_signal(&sx->sx_excl_cv);
288 LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
290 mtx_unlock(sx->sx_lock);
294 _sx_try_upgrade(struct sx *sx, const char *file, int line)
297 _sx_assert(sx, SX_SLOCKED, file, line);
298 mtx_lock(sx->sx_lock);
300 if (sx->sx_cnt == 1) {
302 sx->sx_xholder = curthread;
304 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
305 WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
308 mtx_unlock(sx->sx_lock);
311 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
312 mtx_unlock(sx->sx_lock);
318 _sx_downgrade(struct sx *sx, const char *file, int line)
321 _sx_assert(sx, SX_XLOCKED, file, line);
322 mtx_lock(sx->sx_lock);
323 MPASS(sx->sx_cnt == -1);
325 WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
328 sx->sx_xholder = NULL;
329 if (sx->sx_shrd_wcnt > 0)
330 cv_broadcast(&sx->sx_shrd_cv);
332 LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
334 mtx_unlock(sx->sx_lock);
337 #ifdef INVARIANT_SUPPORT
343 * In the non-WITNESS case, sx_assert() can only detect that at least
344 * *some* thread owns an slock, but it cannot guarantee that *this*
345 * thread owns an slock.
348 _sx_assert(struct sx *sx, int what, const char *file, int line)
351 if (panicstr != NULL)
357 witness_assert(&sx->sx_object, what, file, line);
359 mtx_lock(sx->sx_lock);
360 if (sx->sx_cnt <= 0 &&
361 (what == SX_SLOCKED || sx->sx_xholder != curthread))
362 panic("Lock %s not %slocked @ %s:%d\n",
363 sx->sx_object.lo_name, (what == SX_SLOCKED) ?
364 "share " : "", file, line);
365 mtx_unlock(sx->sx_lock);
369 mtx_lock(sx->sx_lock);
370 if (sx->sx_xholder != curthread)
371 panic("Lock %s not exclusively locked @ %s:%d\n",
372 sx->sx_object.lo_name, file, line);
373 mtx_unlock(sx->sx_lock);
377 witness_assert(&sx->sx_object, what, file, line);
380 * We are able to check only exclusive lock here,
381 * we cannot assert that *this* thread owns slock.
383 mtx_lock(sx->sx_lock);
384 if (sx->sx_xholder == curthread)
385 panic("Lock %s exclusively locked @ %s:%d\n",
386 sx->sx_object.lo_name, file, line);
387 mtx_unlock(sx->sx_lock);
391 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
395 #endif /* INVARIANT_SUPPORT */
399 db_show_sx(struct lock_object *lock)
404 sx = (struct sx *)lock;
406 db_printf(" state: ");
407 if (sx->sx_cnt < 0) {
409 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
410 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
411 } else if (sx->sx_cnt > 0)
412 db_printf("SLOCK: %d locks\n", sx->sx_cnt);
414 db_printf("UNLOCKED\n");
415 db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt,
420 * Check to see if a thread that is blocked on a sleep queue is actually
421 * blocked on an sx lock. If so, output some details and return true.
422 * If the lock has an exclusive owner, return that in *ownerp.
425 sx_chain(struct thread *td, struct thread **ownerp)
431 * First, see if it looks like td is blocked on a condition
435 if (cv->cv_description != td->td_wmesg)
439 * Ok, see if it looks like td is blocked on the exclusive
440 * condition variable.
442 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_excl_cv));
443 if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx &&
444 sx->sx_excl_wcnt > 0)
448 * Second, see if it looks like td is blocked on the shared
449 * condition variable.
451 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_shrd_cv));
452 if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx &&
453 sx->sx_shrd_wcnt > 0)
456 /* Doesn't seem to be an sx lock. */
460 /* We think we have an sx lock, so output some details. */
461 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
462 if (sx->sx_cnt >= 0) {
463 db_printf("SLOCK (count %d)\n", sx->sx_cnt);
466 db_printf("XLOCK\n");
467 *ownerp = sx->sx_xholder;