2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice(s), this list of conditions and the following disclaimer as
9 * the first lines of this file unmodified other than the possible
10 * addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice(s), this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 * Shared/exclusive locks. This implementation assures deterministic lock
30 * granting behavior, so that slocks and xlocks are interleaved.
32 * Priority propagation will not generally raise the priority of lock holders,
33 * so should not be relied upon in combination with sx locks.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/linker_set.h>
45 #include <sys/condvar.h>
47 #include <sys/mutex.h>
50 #include <sys/lock_profile.h>
55 static void db_show_sx(struct lock_object *lock);
57 static void lock_sx(struct lock_object *lock, int how);
58 static int unlock_sx(struct lock_object *lock);
60 struct lock_class lock_class_sx = {
62 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
64 .lc_ddb_show = db_show_sx,
67 .lc_unlock = unlock_sx,
71 #define _sx_assert(sx, what, file, line)
75 lock_sx(struct lock_object *lock, int how)
79 sx = (struct sx *)lock;
87 unlock_sx(struct lock_object *lock)
91 sx = (struct sx *)lock;
92 sx_assert(sx, SX_LOCKED | LA_NOTRECURSED);
103 sx_sysinit(void *arg)
105 struct sx_args *sargs = arg;
107 sx_init(sargs->sa_sx, sargs->sa_desc);
111 sx_init(struct sx *sx, const char *description)
114 sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
116 cv_init(&sx->sx_shrd_cv, description);
117 sx->sx_shrd_wcnt = 0;
118 cv_init(&sx->sx_excl_cv, description);
119 sx->sx_excl_wcnt = 0;
120 sx->sx_xholder = NULL;
121 lock_profile_object_init(&sx->lock_object, &lock_class_sx, description);
122 lock_init(&sx->lock_object, &lock_class_sx, description, NULL,
123 LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
127 sx_destroy(struct sx *sx)
130 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
131 0), ("%s (%s): holders or waiters\n", __func__,
132 sx->lock_object.lo_name));
135 cv_destroy(&sx->sx_shrd_cv);
136 cv_destroy(&sx->sx_excl_cv);
138 lock_profile_object_destroy(&sx->lock_object);
139 lock_destroy(&sx->lock_object);
143 _sx_slock(struct sx *sx, const char *file, int line)
145 uint64_t waittime = 0;
148 mtx_lock(sx->sx_lock);
149 KASSERT(sx->sx_xholder != curthread,
150 ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
151 sx->lock_object.lo_name, file, line));
152 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
155 * Loop in case we lose the race for lock acquisition.
157 while (sx->sx_cnt < 0) {
159 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime);
160 cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
164 /* Acquire a shared lock. */
168 lock_profile_obtain_lock_success(&sx->lock_object, contested, waittime, file, line);
170 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
171 WITNESS_LOCK(&sx->lock_object, 0, file, line);
172 curthread->td_locks++;
174 mtx_unlock(sx->sx_lock);
178 _sx_try_slock(struct sx *sx, const char *file, int line)
181 mtx_lock(sx->sx_lock);
182 if (sx->sx_cnt >= 0) {
184 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
185 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
186 curthread->td_locks++;
187 mtx_unlock(sx->sx_lock);
190 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
191 mtx_unlock(sx->sx_lock);
197 _sx_xlock(struct sx *sx, const char *file, int line)
200 uint64_t waittime = 0;
202 mtx_lock(sx->sx_lock);
205 * With sx locks, we're absolutely not permitted to recurse on
206 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
207 * by WITNESS, but as it is not semantically correct to hold the
208 * xlock while in here, we consider it API abuse and put it under
211 KASSERT(sx->sx_xholder != curthread,
212 ("%s (%s): xlock already held @ %s:%d", __func__,
213 sx->lock_object.lo_name, file, line));
214 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
217 /* Loop in case we lose the race for lock acquisition. */
218 while (sx->sx_cnt != 0) {
220 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime);
221 cv_wait(&sx->sx_excl_cv, sx->sx_lock);
225 MPASS(sx->sx_cnt == 0);
227 /* Acquire an exclusive lock. */
229 sx->sx_xholder = curthread;
231 lock_profile_obtain_lock_success(&sx->lock_object, contested, waittime, file, line);
232 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, 0, file, line);
233 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
234 curthread->td_locks++;
236 mtx_unlock(sx->sx_lock);
240 _sx_try_xlock(struct sx *sx, const char *file, int line)
243 mtx_lock(sx->sx_lock);
244 if (sx->sx_cnt == 0) {
246 sx->sx_xholder = curthread;
247 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, 1, file, line);
248 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
250 curthread->td_locks++;
251 mtx_unlock(sx->sx_lock);
254 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, 0, file, line);
255 mtx_unlock(sx->sx_lock);
261 _sx_sunlock(struct sx *sx, const char *file, int line)
263 _sx_assert(sx, SX_SLOCKED, file, line);
264 mtx_lock(sx->sx_lock);
266 curthread->td_locks--;
267 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
272 if (sx->sx_cnt == 0) {
273 lock_profile_release_lock(&sx->lock_object);
277 * If we just released the last shared lock, wake any waiters up, giving
278 * exclusive lockers precedence. In order to make sure that exclusive
279 * lockers won't be blocked forever, don't wake shared lock waiters if
280 * there are exclusive lock waiters.
282 if (sx->sx_excl_wcnt > 0) {
284 cv_signal(&sx->sx_excl_cv);
285 } else if (sx->sx_shrd_wcnt > 0)
286 cv_broadcast(&sx->sx_shrd_cv);
288 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
290 mtx_unlock(sx->sx_lock);
294 _sx_xunlock(struct sx *sx, const char *file, int line)
296 _sx_assert(sx, SX_XLOCKED, file, line);
297 mtx_lock(sx->sx_lock);
298 MPASS(sx->sx_cnt == -1);
300 curthread->td_locks--;
301 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
305 sx->sx_xholder = NULL;
308 * Wake up waiters if there are any. Give precedence to slock waiters.
310 if (sx->sx_shrd_wcnt > 0)
311 cv_broadcast(&sx->sx_shrd_cv);
312 else if (sx->sx_excl_wcnt > 0)
313 cv_signal(&sx->sx_excl_cv);
315 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, 0, file, line);
317 lock_profile_release_lock(&sx->lock_object);
318 mtx_unlock(sx->sx_lock);
322 _sx_try_upgrade(struct sx *sx, const char *file, int line)
325 _sx_assert(sx, SX_SLOCKED, file, line);
326 mtx_lock(sx->sx_lock);
328 if (sx->sx_cnt == 1) {
330 sx->sx_xholder = curthread;
332 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, 1, file, line);
333 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
336 mtx_unlock(sx->sx_lock);
339 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, 0, file, line);
340 mtx_unlock(sx->sx_lock);
346 _sx_downgrade(struct sx *sx, const char *file, int line)
349 _sx_assert(sx, SX_XLOCKED, file, line);
350 mtx_lock(sx->sx_lock);
351 MPASS(sx->sx_cnt == -1);
353 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
356 sx->sx_xholder = NULL;
357 if (sx->sx_shrd_wcnt > 0)
358 cv_broadcast(&sx->sx_shrd_cv);
360 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
362 mtx_unlock(sx->sx_lock);
365 #ifdef INVARIANT_SUPPORT
371 * In the non-WITNESS case, sx_assert() can only detect that at least
372 * *some* thread owns an slock, but it cannot guarantee that *this*
373 * thread owns an slock.
376 _sx_assert(struct sx *sx, int what, const char *file, int line)
379 if (panicstr != NULL)
383 case SX_LOCKED | LA_NOTRECURSED:
386 witness_assert(&sx->lock_object, what, file, line);
388 mtx_lock(sx->sx_lock);
389 if (sx->sx_cnt <= 0 &&
390 (what == SX_SLOCKED || sx->sx_xholder != curthread))
391 panic("Lock %s not %slocked @ %s:%d\n",
392 sx->lock_object.lo_name, (what == SX_SLOCKED) ?
393 "share " : "", file, line);
394 mtx_unlock(sx->sx_lock);
398 mtx_lock(sx->sx_lock);
399 if (sx->sx_xholder != curthread)
400 panic("Lock %s not exclusively locked @ %s:%d\n",
401 sx->lock_object.lo_name, file, line);
402 mtx_unlock(sx->sx_lock);
406 witness_assert(&sx->lock_object, what, file, line);
409 * We are able to check only exclusive lock here,
410 * we cannot assert that *this* thread owns slock.
412 mtx_lock(sx->sx_lock);
413 if (sx->sx_xholder == curthread)
414 panic("Lock %s exclusively locked @ %s:%d\n",
415 sx->lock_object.lo_name, file, line);
416 mtx_unlock(sx->sx_lock);
420 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
424 #endif /* INVARIANT_SUPPORT */
428 db_show_sx(struct lock_object *lock)
433 sx = (struct sx *)lock;
435 db_printf(" state: ");
436 if (sx->sx_cnt < 0) {
438 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
439 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
440 } else if (sx->sx_cnt > 0)
441 db_printf("SLOCK: %d locks\n", sx->sx_cnt);
443 db_printf("UNLOCKED\n");
444 db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt,
449 * Check to see if a thread that is blocked on a sleep queue is actually
450 * blocked on an sx lock. If so, output some details and return true.
451 * If the lock has an exclusive owner, return that in *ownerp.
454 sx_chain(struct thread *td, struct thread **ownerp)
460 * First, see if it looks like td is blocked on a condition
464 if (cv->cv_description != td->td_wmesg)
468 * Ok, see if it looks like td is blocked on the exclusive
469 * condition variable.
471 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_excl_cv));
472 if (LOCK_CLASS(&sx->lock_object) == &lock_class_sx &&
473 sx->sx_excl_wcnt > 0)
477 * Second, see if it looks like td is blocked on the shared
478 * condition variable.
480 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_shrd_cv));
481 if (LOCK_CLASS(&sx->lock_object) == &lock_class_sx &&
482 sx->sx_shrd_wcnt > 0)
485 /* Doesn't seem to be an sx lock. */
489 /* We think we have an sx lock, so output some details. */
490 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
491 if (sx->sx_cnt >= 0) {
492 db_printf("SLOCK (count %d)\n", sx->sx_cnt);
495 db_printf("XLOCK\n");
496 *ownerp = sx->sx_xholder;