3 * The Regents of the University of California. All rights reserved.
6 * John S. Dyson. All rights reserved.
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
47 #include "opt_global.h"
49 #include <sys/param.h>
51 #include <sys/kernel.h>
54 #include <sys/lockmgr.h>
55 #include <sys/mutex.h>
57 #include <sys/systm.h>
58 #include <sys/lock_profile.h>
60 #include <sys/stack.h>
63 #define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT)
64 #define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
65 #define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0)
66 #define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC)
68 static void assert_lockmgr(struct lock_object *lock, int what);
71 static void db_show_lockmgr(struct lock_object *lock);
73 static void lock_lockmgr(struct lock_object *lock, int how);
74 static int unlock_lockmgr(struct lock_object *lock);
76 struct lock_class lock_class_lockmgr = {
78 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
79 .lc_assert = assert_lockmgr,
81 .lc_ddb_show = db_show_lockmgr,
83 .lc_lock = lock_lockmgr,
84 .lc_unlock = unlock_lockmgr,
88 #define _lockmgr_assert(lkp, what, file, line)
92 * Locking primitives implementation.
93 * Locks provide shared/exclusive sychronization.
97 assert_lockmgr(struct lock_object *lock, int what)
100 panic("lockmgr locks do not support assertions");
104 lock_lockmgr(struct lock_object *lock, int how)
107 panic("lockmgr locks do not support sleep interlocking");
111 unlock_lockmgr(struct lock_object *lock)
114 panic("lockmgr locks do not support sleep interlocking");
117 #define COUNT(td, x) ((td)->td_locks += (x))
118 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
119 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
121 static int acquire(struct lock **lkpp, int extflags, int wanted,
122 const char *wmesg, int prio, int timo, int *contested, uint64_t *waittime);
123 static int acquiredrain(struct lock *lkp, int extflags, const char *wmesg,
127 sharelock(struct thread *td, struct lock *lkp, int incr) {
128 lkp->lk_flags |= LK_SHARE_NONZERO;
129 lkp->lk_sharecount += incr;
134 shareunlock(struct thread *td, struct lock *lkp, int decr) {
136 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
139 if (lkp->lk_sharecount == decr) {
140 lkp->lk_flags &= ~LK_SHARE_NONZERO;
141 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
144 lkp->lk_sharecount = 0;
146 lkp->lk_sharecount -= decr;
151 acquire(struct lock **lkpp, int extflags, int wanted, const char *wmesg,
152 int prio, int timo, int *contested, uint64_t *waittime)
154 struct lock *lkp = *lkpp;
156 int error, iprio, itimo;
158 iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
159 iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
160 itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
163 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
164 lkp, extflags, wanted);
166 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
169 if ((lkp->lk_flags & wanted) != 0)
170 lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
172 while ((lkp->lk_flags & wanted) != 0) {
174 "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
176 lkp->lk_flags |= LK_WAIT_NONZERO;
178 error = msleep(lkp, lkp->lk_interlock, iprio, iwmesg,
179 ((extflags & LK_TIMELOCK) ? itimo : 0));
181 if (lkp->lk_waitcount == 0)
182 lkp->lk_flags &= ~LK_WAIT_NONZERO;
185 if (extflags & LK_SLEEPFAIL) {
189 if (lkp->lk_newlock != NULL) {
190 mtx_lock(lkp->lk_newlock->lk_interlock);
191 mtx_unlock(lkp->lk_interlock);
192 if (lkp->lk_waitcount == 0)
193 wakeup((void *)(&lkp->lk_newlock));
194 *lkpp = lkp = lkp->lk_newlock;
197 mtx_assert(lkp->lk_interlock, MA_OWNED);
202 * Set, change, or release a lock.
204 * Shared requests increment the shared count. Exclusive requests set the
205 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
206 * accepted shared locks and shared-to-exclusive upgrades to go away.
209 _lockmgr_args(struct lock *lkp, u_int flags, struct mtx *interlkp,
210 const char *wmesg, int prio, int timo, char *file, int line)
215 int extflags, lockflags;
217 uint64_t waitstart = 0;
223 if (lkp->lk_flags & LK_DESTROYED) {
224 if (flags & LK_INTERLOCK)
225 mtx_unlock(interlkp);
226 if (panicstr != NULL)
228 panic("%s: %p lockmgr is destroyed", __func__, lkp);
231 mtx_lock(lkp->lk_interlock);
233 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
234 "td == %p", lkp, (wmesg != LK_WMESG_DEFAULT) ? wmesg :
235 lkp->lk_wmesg, lkp->lk_lockholder, lkp->lk_exclusivecount, flags,
239 struct stack stack; /* XXX */
241 CTRSTACK(KTR_LOCK, &stack, 0, 1);
245 if (flags & LK_INTERLOCK) {
246 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
247 mtx_unlock(interlkp);
250 if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
251 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
252 &lkp->lk_interlock->lock_object,
253 "Acquiring lockmgr lock \"%s\"",
254 (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg);
256 if (panicstr != NULL) {
257 mtx_unlock(lkp->lk_interlock);
260 if ((lkp->lk_flags & LK_NOSHARE) &&
261 (flags & LK_TYPE_MASK) == LK_SHARED) {
262 flags &= ~LK_TYPE_MASK;
263 flags |= LK_EXCLUSIVE;
265 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
267 switch (flags & LK_TYPE_MASK) {
270 if (!LOCKMGR_TRYOP(extflags))
271 WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER, file,
274 * If we are not the exclusive lock holder, we have to block
275 * while there is an exclusive lock holder or while an
276 * exclusive lock request or upgrade request is in progress.
278 * However, if TDP_DEADLKTREAT is set, we override exclusive
279 * lock requests or upgrade requests ( but not the exclusive
282 if (lkp->lk_lockholder != td) {
283 lockflags = LK_HAVE_EXCL;
284 if (!(td->td_pflags & TDP_DEADLKTREAT))
285 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
286 error = acquire(&lkp, extflags, lockflags, wmesg,
287 prio, timo, &contested, &waitstart);
290 sharelock(td, lkp, 1);
291 if (lkp->lk_sharecount == 1)
292 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
293 WITNESS_LOCK(&lkp->lk_object, LOCKMGR_TRYW(extflags),
296 #if defined(DEBUG_LOCKS)
297 stack_save(&lkp->lk_stack);
302 * We hold an exclusive lock, so downgrade it to shared.
303 * An alternative would be to fail with EDEADLK.
305 /* FALLTHROUGH downgrade */
308 _lockmgr_assert(lkp, KA_XLOCKED, file, line);
309 sharelock(td, lkp, lkp->lk_exclusivecount);
310 WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
311 COUNT(td, -lkp->lk_exclusivecount);
312 lkp->lk_exclusivecount = 0;
313 lkp->lk_flags &= ~LK_HAVE_EXCL;
314 lkp->lk_lockholder = LK_NOPROC;
315 if (lkp->lk_waitcount)
321 * Upgrade a shared lock to an exclusive one. If another
322 * shared lock has already requested an upgrade to an
323 * exclusive lock, our shared lock is released and an
324 * exclusive lock is requested (which will be granted
325 * after the upgrade). If we return an error, the file
326 * will always be unlocked.
328 _lockmgr_assert(lkp, KA_SLOCKED, file, line);
329 shareunlock(td, lkp, 1);
330 if (lkp->lk_sharecount == 0)
331 lock_profile_release_lock(&lkp->lk_object);
333 * If we are just polling, check to see if we will block.
335 if ((extflags & LK_NOWAIT) &&
336 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
337 lkp->lk_sharecount > 1)) {
339 WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
342 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
344 * We are first shared lock to request an upgrade, so
345 * request upgrade and wait for the shared count to
346 * drop to zero, then take exclusive lock.
348 lkp->lk_flags |= LK_WANT_UPGRADE;
349 error = acquire(&lkp, extflags, LK_SHARE_NONZERO, wmesg,
350 prio, timo, &contested, &waitstart);
351 lkp->lk_flags &= ~LK_WANT_UPGRADE;
354 if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
356 WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
359 if (lkp->lk_exclusivecount != 0)
360 panic("lockmgr: non-zero exclusive count");
361 lkp->lk_flags |= LK_HAVE_EXCL;
362 lkp->lk_lockholder = td;
363 lkp->lk_exclusivecount = 1;
364 WITNESS_UPGRADE(&lkp->lk_object, LOP_EXCLUSIVE |
365 LOP_TRYLOCK, file, line);
367 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
368 #if defined(DEBUG_LOCKS)
369 stack_save(&lkp->lk_stack);
374 * Someone else has requested upgrade. Release our shared
375 * lock, awaken upgrade requestor if we are the last shared
376 * lock, then request an exclusive lock.
378 WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
379 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
382 /* FALLTHROUGH exclusive request */
385 if (!LOCKMGR_TRYOP(extflags))
386 WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
387 LOP_EXCLUSIVE, file, line);
388 if (lkp->lk_lockholder == td) {
392 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
393 panic("lockmgr: locking against myself");
394 if ((extflags & LK_CANRECURSE) != 0) {
395 lkp->lk_exclusivecount++;
396 WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
397 LOCKMGR_TRYW(extflags), file, line);
403 * If we are just polling, check to see if we will sleep.
405 if ((extflags & LK_NOWAIT) &&
406 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
411 * Try to acquire the want_exclusive flag.
413 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL),
414 wmesg, prio, timo, &contested, &waitstart);
417 lkp->lk_flags |= LK_WANT_EXCL;
419 * Wait for shared locks and upgrades to finish.
421 error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE |
422 LK_SHARE_NONZERO, wmesg, prio, timo,
423 &contested, &waitstart);
424 lkp->lk_flags &= ~LK_WANT_EXCL;
426 if (lkp->lk_flags & LK_WAIT_NONZERO)
430 lkp->lk_flags |= LK_HAVE_EXCL;
431 lkp->lk_lockholder = td;
432 if (lkp->lk_exclusivecount != 0)
433 panic("lockmgr: non-zero exclusive count");
434 lkp->lk_exclusivecount = 1;
435 WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
436 LOCKMGR_TRYW(extflags), file, line);
438 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
439 #if defined(DEBUG_LOCKS)
440 stack_save(&lkp->lk_stack);
445 _lockmgr_assert(lkp, KA_LOCKED, file, line);
446 if (lkp->lk_exclusivecount != 0) {
447 if (lkp->lk_lockholder != LK_KERNPROC) {
448 WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
452 if (lkp->lk_exclusivecount-- == 1) {
453 lkp->lk_flags &= ~LK_HAVE_EXCL;
454 lkp->lk_lockholder = LK_NOPROC;
455 lock_profile_release_lock(&lkp->lk_object);
457 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
458 WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
459 shareunlock(td, lkp, 1);
462 if (lkp->lk_flags & LK_WAIT_NONZERO)
468 * Check that we do not already hold the lock, as it can
469 * never drain if we do. Unfortunately, we have no way to
470 * check for holding a shared lock, but at least we can
471 * check for an exclusive one.
473 if (!LOCKMGR_TRYOP(extflags))
474 WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
475 LOP_EXCLUSIVE, file, line);
476 if (lkp->lk_lockholder == td)
477 panic("lockmgr: draining against myself");
479 error = acquiredrain(lkp, extflags, wmesg, prio, timo);
482 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
483 lkp->lk_lockholder = td;
484 lkp->lk_exclusivecount = 1;
485 WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
486 LOCKMGR_TRYW(extflags), file, line);
488 #if defined(DEBUG_LOCKS)
489 stack_save(&lkp->lk_stack);
494 mtx_unlock(lkp->lk_interlock);
495 panic("lockmgr: unknown locktype request %d",
496 flags & LK_TYPE_MASK);
499 if ((lkp->lk_flags & LK_WAITDRAIN) &&
500 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
501 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
502 lkp->lk_flags &= ~LK_WAITDRAIN;
503 wakeup((void *)&lkp->lk_flags);
505 mtx_unlock(lkp->lk_interlock);
510 acquiredrain(struct lock *lkp, int extflags, const char *wmesg, int prio,
514 int error, iprio, itimo;
516 iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
517 iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
518 itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
520 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
523 while (lkp->lk_flags & LK_ALL) {
524 lkp->lk_flags |= LK_WAITDRAIN;
525 error = msleep(&lkp->lk_flags, lkp->lk_interlock, iprio, iwmesg,
526 ((extflags & LK_TIMELOCK) ? itimo : 0));
529 if (extflags & LK_SLEEPFAIL) {
537 * Initialize a lock; required before use.
540 lockinit(lkp, prio, wmesg, timo, flags)
549 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0,
550 ("%s: Invalid flags passed with mask 0x%x", __func__,
551 flags & LK_EXTFLG_MASK));
552 CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
553 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
555 lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
556 lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_FUNC_MASK);
557 lkp->lk_sharecount = 0;
558 lkp->lk_waitcount = 0;
559 lkp->lk_exclusivecount = 0;
562 lkp->lk_lockholder = LK_NOPROC;
563 lkp->lk_newlock = NULL;
564 iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
565 if (!(flags & LK_NODUP))
567 if (flags & LK_NOPROFILE)
568 iflags |= LO_NOPROFILE;
569 if (!(flags & LK_NOWITNESS))
570 iflags |= LO_WITNESS;
571 if (flags & LK_QUIET)
574 stack_zero(&lkp->lk_stack);
576 lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, iflags);
587 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
589 KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0,
590 ("lockmgr still held"));
591 KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed"));
592 lkp->lk_flags = LK_DESTROYED;
593 lock_destroy(&lkp->lk_object);
597 * Disown the lockmgr.
600 _lockmgr_disown(struct lock *lkp, const char *file, int line)
605 KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0,
606 ("%s: %p lockmgr is destroyed", __func__, lkp));
607 _lockmgr_assert(lkp, KA_XLOCKED | KA_NOTRECURSED, file, line);
610 * Drop the lock reference and switch the owner. This will result
611 * in an atomic operation like td_lock is only accessed by curthread
612 * and lk_lockholder only needs one write. Note also that the lock
613 * owner can be alredy KERNPROC, so in that case just skip the
616 if (lkp->lk_lockholder == td) {
617 WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line);
620 lkp->lk_lockholder = LK_KERNPROC;
624 * Determine the status of a lock.
633 KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
634 ("%s: %p lockmgr is destroyed", __func__, lkp));
638 mtx_lock(lkp->lk_interlock);
641 if (lkp->lk_exclusivecount != 0) {
642 if (lkp->lk_lockholder == curthread)
643 lock_type = LK_EXCLUSIVE;
645 lock_type = LK_EXCLOTHER;
646 } else if (lkp->lk_sharecount != 0)
647 lock_type = LK_SHARED;
649 mtx_unlock(lkp->lk_interlock);
654 * Print out information about state of a lock. Used by VOP_PRINT
655 * routines to display status about contained locks.
658 lockmgr_printinfo(lkp)
662 if (lkp->lk_sharecount)
663 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
665 else if (lkp->lk_flags & LK_HAVE_EXCL)
666 printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
667 lkp->lk_wmesg, lkp->lk_exclusivecount,
668 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
669 if (lkp->lk_waitcount > 0)
670 printf(" with %d pending", lkp->lk_waitcount);
672 stack_print_ddb(&lkp->lk_stack);
676 #ifdef INVARIANT_SUPPORT
678 #undef _lockmgr_assert
682 _lockmgr_assert(struct lock *lkp, int what, const char *file, int line)
689 td = lkp->lk_lockholder;
690 if (panicstr != NULL)
694 case KA_SLOCKED | KA_NOTRECURSED:
695 case KA_SLOCKED | KA_RECURSED:
698 case KA_LOCKED | KA_NOTRECURSED:
699 case KA_LOCKED | KA_RECURSED:
702 * We cannot trust WITNESS if the lock is held in
703 * exclusive mode and a call to lockmgr_disown() happened.
704 * Workaround this skipping the check if the lock is
705 * held in exclusive mode even for the KA_LOCKED case.
707 if (slocked || (x & LK_HAVE_EXCL) == 0) {
708 witness_assert(&lkp->lk_object, what, file, line);
712 if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 &&
713 (slocked || LOCKMGR_NOTOWNER(td))))
714 panic("Lock %s not %slocked @ %s:%d\n",
715 lkp->lk_object.lo_name, slocked ? "share " : "",
717 if ((x & LK_SHARE_NONZERO) == 0) {
718 if (lockmgr_recursed(lkp)) {
719 if (what & KA_NOTRECURSED)
720 panic("Lock %s recursed @ %s:%d\n",
721 lkp->lk_object.lo_name, file, line);
722 } else if (what & KA_RECURSED)
723 panic("Lock %s not recursed @ %s:%d\n",
724 lkp->lk_object.lo_name, file, line);
728 case KA_XLOCKED | KA_NOTRECURSED:
729 case KA_XLOCKED | KA_RECURSED:
730 if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td))
731 panic("Lock %s not exclusively locked @ %s:%d\n",
732 lkp->lk_object.lo_name, file, line);
733 if (lockmgr_recursed(lkp)) {
734 if (what & KA_NOTRECURSED)
735 panic("Lock %s recursed @ %s:%d\n",
736 lkp->lk_object.lo_name, file, line);
737 } else if (what & KA_RECURSED)
738 panic("Lock %s not recursed @ %s:%d\n",
739 lkp->lk_object.lo_name, file, line);
742 if (td == curthread || td == LK_KERNPROC)
743 panic("Lock %s exclusively locked @ %s:%d\n",
744 lkp->lk_object.lo_name, file, line);
748 if (LOCKMGR_UNHELD(x)) {
750 panic("Lock %s not locked by anyone @ %s:%d\n",
751 lkp->lk_object.lo_name, file, line);
752 } else if (what & KA_UNHELD)
753 panic("Lock %s locked by someone @ %s:%d\n",
754 lkp->lk_object.lo_name, file, line);
757 panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what,
761 #endif /* INVARIANT_SUPPORT */
765 * Check to see if a thread that is blocked on a sleep queue is actually
766 * blocked on a 'struct lock'. If so, output some details and return true.
767 * If the lock has an exclusive owner, return that in *ownerp.
770 lockmgr_chain(struct thread *td, struct thread **ownerp)
776 /* Simple test to see if wchan points to a lockmgr lock. */
777 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
778 lkp->lk_wmesg == td->td_wmesg)
782 * If this thread is doing a DRAIN, then it would be asleep on
783 * &lkp->lk_flags rather than lkp.
785 lkp = (struct lock *)((char *)td->td_wchan -
786 offsetof(struct lock, lk_flags));
787 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
788 lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
791 /* Doen't seem to be a lockmgr lock. */
795 /* Ok, we think we have a lockmgr lock, so output some details. */
796 db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
797 if (lkp->lk_sharecount) {
798 db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
801 db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
802 *ownerp = lkp->lk_lockholder;
808 db_show_lockmgr(struct lock_object *lock)
813 lkp = (struct lock *)lock;
815 db_printf(" lock type: %s\n", lkp->lk_wmesg);
816 db_printf(" state: ");
817 if (lkp->lk_sharecount)
818 db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
819 else if (lkp->lk_flags & LK_HAVE_EXCL) {
820 td = lkp->lk_lockholder;
821 db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
822 db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
823 td->td_proc->p_pid, td->td_name);
825 db_printf("UNLOCKED\n");
826 if (lkp->lk_waitcount > 0)
827 db_printf(" waiters: %d\n", lkp->lk_waitcount);