2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
45 #include <sys/sleepqueue.h>
47 #include <sys/stack.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
52 #include <machine/cpu.h>
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64 (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
68 #define SQ_EXCLUSIVE_QUEUE 0
69 #define SQ_SHARED_QUEUE 1
72 #define _lockmgr_assert(lk, what, file, line)
75 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
76 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
79 #define STACK_PRINT(lk)
80 #define STACK_SAVE(lk)
81 #define STACK_ZERO(lk)
83 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
84 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
85 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
88 #define LOCK_LOG2(lk, string, arg1, arg2) \
89 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
90 CTR2(KTR_LOCK, (string), (arg1), (arg2))
91 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
93 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
95 #define GIANT_DECLARE \
97 WITNESS_SAVE_DECL(Giant)
98 #define GIANT_RESTORE() do { \
99 if (__predict_false(_i > 0)) { \
102 WITNESS_RESTORE(&Giant.lock_object, Giant); \
105 #define GIANT_SAVE() do { \
106 if (__predict_false(mtx_owned(&Giant))) { \
107 WITNESS_SAVE(&Giant.lock_object, Giant); \
108 while (mtx_owned(&Giant)) { \
110 mtx_unlock(&Giant); \
115 static bool __always_inline
116 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
119 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
122 if (fp || (!(x & LK_SHARE)))
124 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
125 (curthread->td_pflags & TDP_DEADLKTREAT))
130 #define LK_TRYOP(x) \
133 #define LK_CAN_WITNESS(x) \
134 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
135 #define LK_TRYWIT(x) \
136 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
138 #define LK_CAN_ADAPT(lk, f) \
139 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
140 ((f) & LK_SLEEPFAIL) == 0)
142 #define lockmgr_disowned(lk) \
143 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
145 #define lockmgr_xlocked_v(v) \
146 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
148 #define lockmgr_xlocked(lk) lockmgr_xlocked_v((lk)->lk_lock)
150 static void assert_lockmgr(const struct lock_object *lock, int how);
152 static void db_show_lockmgr(const struct lock_object *lock);
154 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
156 static int owner_lockmgr(const struct lock_object *lock,
157 struct thread **owner);
159 static uintptr_t unlock_lockmgr(struct lock_object *lock);
161 struct lock_class lock_class_lockmgr = {
162 .lc_name = "lockmgr",
163 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
164 .lc_assert = assert_lockmgr,
166 .lc_ddb_show = db_show_lockmgr,
168 .lc_lock = lock_lockmgr,
169 .lc_unlock = unlock_lockmgr,
171 .lc_owner = owner_lockmgr,
175 struct lockmgr_wait {
181 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
183 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
186 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
188 struct lock_class *class;
190 if (flags & LK_INTERLOCK) {
191 class = LOCK_CLASS(ilk);
192 class->lc_unlock(ilk);
195 if (__predict_false(wakeup_swapper))
200 lockmgr_note_shared_acquire(struct lock *lk, int contested,
201 uint64_t waittime, const char *file, int line, int flags)
204 lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
206 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
207 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
208 TD_LOCKS_INC(curthread);
209 TD_SLOCKS_INC(curthread);
214 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
217 lock_profile_release_lock(&lk->lock_object);
218 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
219 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
220 TD_LOCKS_DEC(curthread);
221 TD_SLOCKS_DEC(curthread);
225 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
226 uint64_t waittime, const char *file, int line, int flags)
229 lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
231 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
232 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
234 TD_LOCKS_INC(curthread);
239 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
242 lock_profile_release_lock(&lk->lock_object);
243 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
245 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
246 TD_LOCKS_DEC(curthread);
249 static __inline struct thread *
250 lockmgr_xholder(const struct lock *lk)
255 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
259 * It assumes sleepq_lock held and returns with this one unheld.
260 * It also assumes the generic interlock is sane and previously checked.
261 * If LK_INTERLOCK is specified the interlock is not reacquired after the
265 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
266 const char *wmesg, int pri, int timo, int queue)
269 struct lock_class *class;
272 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
273 catch = pri & PCATCH;
277 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
278 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
280 if (flags & LK_INTERLOCK)
281 class->lc_unlock(ilk);
282 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
285 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
286 SLEEPQ_INTERRUPTIBLE : 0), queue);
287 if ((flags & LK_TIMELOCK) && timo)
288 sleepq_set_timeout(&lk->lock_object, timo);
291 * Decisional switch for real sleeping.
293 if ((flags & LK_TIMELOCK) && timo && catch)
294 error = sleepq_timedwait_sig(&lk->lock_object, pri);
295 else if ((flags & LK_TIMELOCK) && timo)
296 error = sleepq_timedwait(&lk->lock_object, pri);
298 error = sleepq_wait_sig(&lk->lock_object, pri);
300 sleepq_wait(&lk->lock_object, pri);
302 if ((flags & LK_SLEEPFAIL) && error == 0)
309 wakeupshlk(struct lock *lk, const char *file, int line)
311 uintptr_t v, x, orig_x;
313 int queue, wakeup_swapper;
318 if (lockmgr_sunlock_try(lk, &x))
322 * We should have a sharer with waiters, so enter the hard
323 * path in order to handle wakeups correctly.
325 sleepq_lock(&lk->lock_object);
326 orig_x = lk->lk_lock;
328 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
332 * If the lock has exclusive waiters, give them preference in
333 * order to avoid deadlock with shared runners up.
334 * If interruptible sleeps left the exclusive queue empty
335 * avoid a starvation for the threads sleeping on the shared
336 * queue by giving them precedence and cleaning up the
337 * exclusive waiters bit anyway.
338 * Please note that lk_exslpfail count may be lying about
339 * the real number of waiters with the LK_SLEEPFAIL flag on
340 * because they may be used in conjunction with interruptible
341 * sleeps so lk_exslpfail might be considered an 'upper limit'
342 * bound, including the edge cases.
344 realexslp = sleepq_sleepcnt(&lk->lock_object,
346 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
347 if (lk->lk_exslpfail < realexslp) {
348 lk->lk_exslpfail = 0;
349 queue = SQ_EXCLUSIVE_QUEUE;
350 v |= (x & LK_SHARED_WAITERS);
352 lk->lk_exslpfail = 0;
354 "%s: %p has only LK_SLEEPFAIL sleepers",
357 "%s: %p waking up threads on the exclusive queue",
360 sleepq_broadcast(&lk->lock_object,
361 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
362 queue = SQ_SHARED_QUEUE;
368 * Exclusive waiters sleeping with LK_SLEEPFAIL on
369 * and using interruptible sleeps/timeout may have
370 * left spourious lk_exslpfail counts on, so clean
373 lk->lk_exslpfail = 0;
374 queue = SQ_SHARED_QUEUE;
377 if (lockmgr_sunlock_try(lk, &orig_x)) {
378 sleepq_release(&lk->lock_object);
382 x |= LK_SHARERS_LOCK(1);
383 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
387 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
388 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
390 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
392 sleepq_release(&lk->lock_object);
396 lockmgr_note_shared_release(lk, file, line);
397 return (wakeup_swapper);
401 assert_lockmgr(const struct lock_object *lock, int what)
404 panic("lockmgr locks do not support assertions");
408 lock_lockmgr(struct lock_object *lock, uintptr_t how)
411 panic("lockmgr locks do not support sleep interlocking");
415 unlock_lockmgr(struct lock_object *lock)
418 panic("lockmgr locks do not support sleep interlocking");
423 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
426 panic("lockmgr locks do not support owner inquiring");
431 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
435 MPASS((flags & ~LK_INIT_MASK) == 0);
436 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
437 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
440 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
441 if (flags & LK_CANRECURSE)
442 iflags |= LO_RECURSABLE;
443 if ((flags & LK_NODUP) == 0)
445 if (flags & LK_NOPROFILE)
446 iflags |= LO_NOPROFILE;
447 if ((flags & LK_NOWITNESS) == 0)
448 iflags |= LO_WITNESS;
449 if (flags & LK_QUIET)
451 if (flags & LK_IS_VNODE)
452 iflags |= LO_IS_VNODE;
455 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
457 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
458 lk->lk_lock = LK_UNLOCKED;
460 lk->lk_exslpfail = 0;
467 * XXX: Gross hacks to manipulate external lock flags after
468 * initialization. Used for certain vnode and buf locks.
471 lockallowshare(struct lock *lk)
474 lockmgr_assert(lk, KA_XLOCKED);
475 lk->lock_object.lo_flags &= ~LK_NOSHARE;
479 lockdisableshare(struct lock *lk)
482 lockmgr_assert(lk, KA_XLOCKED);
483 lk->lock_object.lo_flags |= LK_NOSHARE;
487 lockallowrecurse(struct lock *lk)
490 lockmgr_assert(lk, KA_XLOCKED);
491 lk->lock_object.lo_flags |= LO_RECURSABLE;
495 lockdisablerecurse(struct lock *lk)
498 lockmgr_assert(lk, KA_XLOCKED);
499 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
503 lockdestroy(struct lock *lk)
506 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
507 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
508 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
509 lock_destroy(&lk->lock_object);
512 static bool __always_inline
513 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
517 * If no other thread has an exclusive lock, or
518 * no exclusive waiter is present, bump the count of
519 * sharers. Since we have to preserve the state of
520 * waiters, if we fail to acquire the shared lock
521 * loop back and retry.
524 while (LK_CAN_SHARE(*xp, flags, fp)) {
525 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
526 *xp + LK_ONE_SHARER)) {
533 static bool __always_inline
534 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
538 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
539 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
540 *xp - LK_ONE_SHARER))
549 static __noinline int
550 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
551 const char *file, int line, struct lockmgr_wait *lwa)
558 #ifdef LOCK_PROFILING
559 uint64_t waittime = 0;
563 if (__predict_false(panicstr != NULL))
566 tid = (uintptr_t)curthread;
568 if (LK_CAN_WITNESS(flags))
569 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
570 file, line, flags & LK_INTERLOCK ? ilk : NULL);
572 if (lockmgr_slock_try(lk, &x, flags, false))
575 PMC_SOFT_CALL( , , lock, failed);
577 lock_profile_obtain_lock_failed(&lk->lock_object,
578 &contested, &waittime);
581 * If the lock is already held by curthread in
582 * exclusive way avoid a deadlock.
584 if (LK_HOLDER(x) == tid) {
586 "%s: %p already held in exclusive mode",
593 * If the lock is expected to not sleep just give up
596 if (LK_TRYOP(flags)) {
597 LOCK_LOG2(lk, "%s: %p fails the try operation",
604 * Acquire the sleepqueue chain lock because we
605 * probabilly will need to manipulate waiters flags.
607 sleepq_lock(&lk->lock_object);
612 * if the lock can be acquired in shared mode, try
615 if (LK_CAN_SHARE(x, flags, false)) {
616 sleepq_release(&lk->lock_object);
621 * Try to set the LK_SHARED_WAITERS flag. If we fail,
622 * loop back and retry.
624 if ((x & LK_SHARED_WAITERS) == 0) {
625 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
626 x | LK_SHARED_WAITERS)) {
629 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
634 iwmesg = lk->lock_object.lo_name;
638 iwmesg = lwa->iwmesg;
644 * As far as we have been unable to acquire the
645 * shared lock and the shared waiters flag is set,
648 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
650 flags &= ~LK_INTERLOCK;
653 "%s: interrupted sleep for %p with %d",
654 __func__, lk, error);
657 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
661 #ifdef LOCK_PROFILING
662 lockmgr_note_shared_acquire(lk, contested, waittime,
665 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
671 lockmgr_exit(flags, ilk, 0);
675 static __noinline int
676 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
677 const char *file, int line, struct lockmgr_wait *lwa)
679 struct lock_class *class;
685 #ifdef LOCK_PROFILING
686 uint64_t waittime = 0;
690 if (__predict_false(panicstr != NULL))
693 tid = (uintptr_t)curthread;
695 if (LK_CAN_WITNESS(flags))
696 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
697 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
701 * If curthread already holds the lock and this one is
702 * allowed to recurse, simply recurse on it.
704 if (lockmgr_xlocked(lk)) {
705 if ((flags & LK_CANRECURSE) == 0 &&
706 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
708 * If the lock is expected to not panic just
709 * give up and return.
711 if (LK_TRYOP(flags)) {
713 "%s: %p fails the try operation",
718 if (flags & LK_INTERLOCK) {
719 class = LOCK_CLASS(ilk);
720 class->lc_unlock(ilk);
722 panic("%s: recursing on non recursive lockmgr %p "
723 "@ %s:%d\n", __func__, lk, file, line);
726 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
727 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
728 lk->lk_recurse, file, line);
729 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
730 LK_TRYWIT(flags), file, line);
731 TD_LOCKS_INC(curthread);
736 if (lk->lk_lock == LK_UNLOCKED &&
737 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
740 PMC_SOFT_CALL( , , lock, failed);
742 lock_profile_obtain_lock_failed(&lk->lock_object,
743 &contested, &waittime);
746 * If the lock is expected to not sleep just give up
749 if (LK_TRYOP(flags)) {
750 LOCK_LOG2(lk, "%s: %p fails the try operation",
757 * Acquire the sleepqueue chain lock because we
758 * probabilly will need to manipulate waiters flags.
760 sleepq_lock(&lk->lock_object);
765 * if the lock has been released while we spun on
766 * the sleepqueue chain lock just try again.
768 if (x == LK_UNLOCKED) {
769 sleepq_release(&lk->lock_object);
774 * The lock can be in the state where there is a
775 * pending queue of waiters, but still no owner.
776 * This happens when the lock is contested and an
777 * owner is going to claim the lock.
778 * If curthread is the one successfully acquiring it
779 * claim lock ownership and return, preserving waiters
782 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
783 if ((x & ~v) == LK_UNLOCKED) {
784 v &= ~LK_EXCLUSIVE_SPINNERS;
785 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
787 sleepq_release(&lk->lock_object);
789 "%s: %p claimed by a new writer",
797 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
798 * fail, loop back and retry.
800 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
801 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
802 x | LK_EXCLUSIVE_WAITERS)) {
805 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
810 iwmesg = lk->lock_object.lo_name;
814 iwmesg = lwa->iwmesg;
820 * As far as we have been unable to acquire the
821 * exclusive lock and the exclusive waiters flag
822 * is set, we will sleep.
824 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
826 flags &= ~LK_INTERLOCK;
829 "%s: interrupted sleep for %p with %d",
830 __func__, lk, error);
833 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
837 #ifdef LOCK_PROFILING
838 lockmgr_note_exclusive_acquire(lk, contested, waittime,
841 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
847 lockmgr_exit(flags, ilk, 0);
851 static __noinline int
852 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
853 const char *file, int line, struct lockmgr_wait *lwa)
857 int wakeup_swapper = 0;
860 if (__predict_false(panicstr != NULL))
863 tid = (uintptr_t)curthread;
865 _lockmgr_assert(lk, KA_SLOCKED, file, line);
867 x = v & LK_ALL_WAITERS;
868 v &= LK_EXCLUSIVE_SPINNERS;
871 * Try to switch from one shared lock to an exclusive one.
872 * We need to preserve waiters flags during the operation.
874 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
876 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
878 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
879 LK_TRYWIT(flags), file, line);
880 TD_SLOCKS_DEC(curthread);
884 op = flags & LK_TYPE_MASK;
887 * In LK_TRYUPGRADE mode, do not drop the lock,
888 * returning EBUSY instead.
890 if (op == LK_TRYUPGRADE) {
891 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
898 * We have been unable to succeed in upgrading, so just
899 * give up the shared lock.
901 wakeup_swapper |= wakeupshlk(lk, file, line);
902 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
903 flags &= ~LK_INTERLOCK;
905 lockmgr_exit(flags, ilk, wakeup_swapper);
910 lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
911 const char *file, int line)
913 struct lock_class *class;
918 if (__predict_false(panicstr != NULL))
921 op = flags & LK_TYPE_MASK;
925 if (LK_CAN_WITNESS(flags))
926 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
927 file, line, flags & LK_INTERLOCK ? ilk : NULL);
928 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
930 if (lockmgr_slock_try(lk, &x, flags, true)) {
931 lockmgr_note_shared_acquire(lk, 0, 0,
935 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
940 if (LK_CAN_WITNESS(flags))
941 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
942 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
944 tid = (uintptr_t)curthread;
945 if (lk->lk_lock == LK_UNLOCKED &&
946 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
947 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
951 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
957 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
961 if (__predict_true(locked)) {
962 if (__predict_false(flags & LK_INTERLOCK)) {
963 class = LOCK_CLASS(ilk);
964 class->lc_unlock(ilk);
968 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
969 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
973 static __noinline int
974 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
975 const char *file, int line)
978 int wakeup_swapper = 0;
980 if (__predict_false(panicstr != NULL))
983 wakeup_swapper = wakeupshlk(lk, file, line);
986 lockmgr_exit(flags, ilk, wakeup_swapper);
990 static __noinline int
991 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
992 const char *file, int line)
995 int wakeup_swapper = 0;
999 if (__predict_false(panicstr != NULL))
1002 tid = (uintptr_t)curthread;
1005 * As first option, treact the lock as if it has not
1007 * Fix-up the tid var if the lock has been disowned.
1009 if (LK_HOLDER(x) == LK_KERNPROC)
1012 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1013 TD_LOCKS_DEC(curthread);
1015 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
1018 * The lock is held in exclusive mode.
1019 * If the lock is recursed also, then unrecurse it.
1021 if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
1022 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1026 if (tid != LK_KERNPROC)
1027 lock_profile_release_lock(&lk->lock_object);
1029 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1032 sleepq_lock(&lk->lock_object);
1037 * If the lock has exclusive waiters, give them
1038 * preference in order to avoid deadlock with
1039 * shared runners up.
1040 * If interruptible sleeps left the exclusive queue
1041 * empty avoid a starvation for the threads sleeping
1042 * on the shared queue by giving them precedence
1043 * and cleaning up the exclusive waiters bit anyway.
1044 * Please note that lk_exslpfail count may be lying
1045 * about the real number of waiters with the
1046 * LK_SLEEPFAIL flag on because they may be used in
1047 * conjunction with interruptible sleeps so
1048 * lk_exslpfail might be considered an 'upper limit'
1049 * bound, including the edge cases.
1051 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1052 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1053 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1054 if (lk->lk_exslpfail < realexslp) {
1055 lk->lk_exslpfail = 0;
1056 queue = SQ_EXCLUSIVE_QUEUE;
1057 v |= (x & LK_SHARED_WAITERS);
1059 lk->lk_exslpfail = 0;
1061 "%s: %p has only LK_SLEEPFAIL sleepers",
1064 "%s: %p waking up threads on the exclusive queue",
1066 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1067 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1068 queue = SQ_SHARED_QUEUE;
1073 * Exclusive waiters sleeping with LK_SLEEPFAIL
1074 * on and using interruptible sleeps/timeout
1075 * may have left spourious lk_exslpfail counts
1076 * on, so clean it up anyway.
1078 lk->lk_exslpfail = 0;
1079 queue = SQ_SHARED_QUEUE;
1082 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1083 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1085 atomic_store_rel_ptr(&lk->lk_lock, v);
1086 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1087 sleepq_release(&lk->lock_object);
1090 lockmgr_exit(flags, ilk, wakeup_swapper);
1095 lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
1097 struct lock_class *class;
1102 if (__predict_false(panicstr != NULL))
1108 _lockmgr_assert(lk, KA_LOCKED, file, line);
1110 if (__predict_true(x & LK_SHARE) != 0) {
1111 if (lockmgr_sunlock_try(lk, &x)) {
1112 lockmgr_note_shared_release(lk, file, line);
1114 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1117 tid = (uintptr_t)curthread;
1118 if (!lockmgr_recursed(lk) &&
1119 atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1120 lockmgr_note_exclusive_release(lk, file, line);
1122 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1125 if (__predict_false(flags & LK_INTERLOCK)) {
1126 class = LOCK_CLASS(ilk);
1127 class->lc_unlock(ilk);
1133 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1134 const char *wmesg, int pri, int timo, const char *file, int line)
1137 struct lockmgr_wait lwa;
1138 struct lock_class *class;
1140 uintptr_t tid, v, x;
1141 u_int op, realexslp;
1142 int error, ipri, itimo, queue, wakeup_swapper;
1143 #ifdef LOCK_PROFILING
1144 uint64_t waittime = 0;
1148 if (panicstr != NULL)
1152 tid = (uintptr_t)curthread;
1153 op = (flags & LK_TYPE_MASK);
1154 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1155 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1156 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1158 lwa.iwmesg = iwmesg;
1162 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1163 KASSERT((op & (op - 1)) == 0,
1164 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1165 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1166 (op != LK_DOWNGRADE && op != LK_RELEASE),
1167 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1168 __func__, file, line));
1169 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1170 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1171 __func__, file, line));
1172 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1173 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1174 lk->lock_object.lo_name, file, line));
1176 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1178 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1186 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1188 if (flags & LK_INTERLOCK)
1189 class->lc_unlock(ilk);
1197 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1201 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1204 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1207 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1208 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1209 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1212 * Panic if the lock is recursed.
1214 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1215 if (flags & LK_INTERLOCK)
1216 class->lc_unlock(ilk);
1217 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1218 __func__, iwmesg, file, line);
1220 TD_SLOCKS_INC(curthread);
1223 * In order to preserve waiters flags, just spin.
1227 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1228 x &= LK_ALL_WAITERS;
1229 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1230 LK_SHARERS_LOCK(1) | x))
1236 _lockmgr_assert(lk, KA_LOCKED, file, line);
1239 if (__predict_true(x & LK_SHARE) != 0) {
1240 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1242 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1246 if (LK_CAN_WITNESS(flags))
1247 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1248 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1252 * Trying to drain a lock we already own will result in a
1255 if (lockmgr_xlocked(lk)) {
1256 if (flags & LK_INTERLOCK)
1257 class->lc_unlock(ilk);
1258 panic("%s: draining %s with the lock held @ %s:%d\n",
1259 __func__, iwmesg, file, line);
1263 if (lk->lk_lock == LK_UNLOCKED &&
1264 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1268 PMC_SOFT_CALL( , , lock, failed);
1270 lock_profile_obtain_lock_failed(&lk->lock_object,
1271 &contested, &waittime);
1274 * If the lock is expected to not sleep just give up
1277 if (LK_TRYOP(flags)) {
1278 LOCK_LOG2(lk, "%s: %p fails the try operation",
1285 * Acquire the sleepqueue chain lock because we
1286 * probabilly will need to manipulate waiters flags.
1288 sleepq_lock(&lk->lock_object);
1292 * if the lock has been released while we spun on
1293 * the sleepqueue chain lock just try again.
1295 if (x == LK_UNLOCKED) {
1296 sleepq_release(&lk->lock_object);
1300 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1301 if ((x & ~v) == LK_UNLOCKED) {
1302 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1305 * If interruptible sleeps left the exclusive
1306 * queue empty avoid a starvation for the
1307 * threads sleeping on the shared queue by
1308 * giving them precedence and cleaning up the
1309 * exclusive waiters bit anyway.
1310 * Please note that lk_exslpfail count may be
1311 * lying about the real number of waiters with
1312 * the LK_SLEEPFAIL flag on because they may
1313 * be used in conjunction with interruptible
1314 * sleeps so lk_exslpfail might be considered
1315 * an 'upper limit' bound, including the edge
1318 if (v & LK_EXCLUSIVE_WAITERS) {
1319 queue = SQ_EXCLUSIVE_QUEUE;
1320 v &= ~LK_EXCLUSIVE_WAITERS;
1324 * Exclusive waiters sleeping with
1325 * LK_SLEEPFAIL on and using
1326 * interruptible sleeps/timeout may
1327 * have left spourious lk_exslpfail
1328 * counts on, so clean it up anyway.
1330 MPASS(v & LK_SHARED_WAITERS);
1331 lk->lk_exslpfail = 0;
1332 queue = SQ_SHARED_QUEUE;
1333 v &= ~LK_SHARED_WAITERS;
1335 if (queue == SQ_EXCLUSIVE_QUEUE) {
1337 sleepq_sleepcnt(&lk->lock_object,
1338 SQ_EXCLUSIVE_QUEUE);
1339 if (lk->lk_exslpfail >= realexslp) {
1340 lk->lk_exslpfail = 0;
1341 queue = SQ_SHARED_QUEUE;
1342 v &= ~LK_SHARED_WAITERS;
1343 if (realexslp != 0) {
1345 "%s: %p has only LK_SLEEPFAIL sleepers",
1348 "%s: %p waking up threads on the exclusive queue",
1354 SQ_EXCLUSIVE_QUEUE);
1357 lk->lk_exslpfail = 0;
1359 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1360 sleepq_release(&lk->lock_object);
1364 "%s: %p waking up all threads on the %s queue",
1365 __func__, lk, queue == SQ_SHARED_QUEUE ?
1366 "shared" : "exclusive");
1367 wakeup_swapper |= sleepq_broadcast(
1368 &lk->lock_object, SLEEPQ_LK, 0, queue);
1371 * If shared waiters have been woken up we need
1372 * to wait for one of them to acquire the lock
1373 * before to set the exclusive waiters in
1374 * order to avoid a deadlock.
1376 if (queue == SQ_SHARED_QUEUE) {
1377 for (v = lk->lk_lock;
1378 (v & LK_SHARE) && !LK_SHARERS(v);
1385 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1386 * fail, loop back and retry.
1388 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1389 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1390 x | LK_EXCLUSIVE_WAITERS)) {
1391 sleepq_release(&lk->lock_object);
1394 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1399 * As far as we have been unable to acquire the
1400 * exclusive lock and the exclusive waiters flag
1401 * is set, we will sleep.
1403 if (flags & LK_INTERLOCK) {
1404 class->lc_unlock(ilk);
1405 flags &= ~LK_INTERLOCK;
1408 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1409 SQ_EXCLUSIVE_QUEUE);
1410 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1412 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1417 lock_profile_obtain_lock_success(&lk->lock_object,
1418 contested, waittime, file, line);
1419 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1420 lk->lk_recurse, file, line);
1421 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1422 LK_TRYWIT(flags), file, line);
1423 TD_LOCKS_INC(curthread);
1428 if (flags & LK_INTERLOCK)
1429 class->lc_unlock(ilk);
1430 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1433 if (flags & LK_INTERLOCK)
1434 class->lc_unlock(ilk);
1442 _lockmgr_disown(struct lock *lk, const char *file, int line)
1446 if (SCHEDULER_STOPPED())
1449 tid = (uintptr_t)curthread;
1450 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1453 * Panic if the lock is recursed.
1455 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1456 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1457 __func__, file, line);
1460 * If the owner is already LK_KERNPROC just skip the whole operation.
1462 if (LK_HOLDER(lk->lk_lock) != tid)
1464 lock_profile_release_lock(&lk->lock_object);
1465 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1466 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1467 TD_LOCKS_DEC(curthread);
1471 * In order to preserve waiters flags, just spin.
1475 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1476 x &= LK_ALL_WAITERS;
1477 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1485 lockmgr_printinfo(const struct lock *lk)
1490 if (lk->lk_lock == LK_UNLOCKED)
1491 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1492 else if (lk->lk_lock & LK_SHARE)
1493 printf("lock type %s: SHARED (count %ju)\n",
1494 lk->lock_object.lo_name,
1495 (uintmax_t)LK_SHARERS(lk->lk_lock));
1497 td = lockmgr_xholder(lk);
1498 if (td == (struct thread *)LK_KERNPROC)
1499 printf("lock type %s: EXCL by KERNPROC\n",
1500 lk->lock_object.lo_name);
1502 printf("lock type %s: EXCL by thread %p "
1503 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1504 td, td->td_proc->p_pid, td->td_proc->p_comm,
1509 if (x & LK_EXCLUSIVE_WAITERS)
1510 printf(" with exclusive waiters pending\n");
1511 if (x & LK_SHARED_WAITERS)
1512 printf(" with shared waiters pending\n");
1513 if (x & LK_EXCLUSIVE_SPINNERS)
1514 printf(" with exclusive spinners pending\n");
1520 lockstatus(const struct lock *lk)
1529 if ((x & LK_SHARE) == 0) {
1530 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1534 } else if (x == LK_UNLOCKED)
1540 #ifdef INVARIANT_SUPPORT
1542 FEATURE(invariant_support,
1543 "Support for modules compiled with INVARIANTS option");
1546 #undef _lockmgr_assert
1550 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1554 if (panicstr != NULL)
1558 case KA_SLOCKED | KA_NOTRECURSED:
1559 case KA_SLOCKED | KA_RECURSED:
1562 case KA_LOCKED | KA_NOTRECURSED:
1563 case KA_LOCKED | KA_RECURSED:
1567 * We cannot trust WITNESS if the lock is held in exclusive
1568 * mode and a call to lockmgr_disown() happened.
1569 * Workaround this skipping the check if the lock is held in
1570 * exclusive mode even for the KA_LOCKED case.
1572 if (slocked || (lk->lk_lock & LK_SHARE)) {
1573 witness_assert(&lk->lock_object, what, file, line);
1577 if (lk->lk_lock == LK_UNLOCKED ||
1578 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1579 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1580 panic("Lock %s not %slocked @ %s:%d\n",
1581 lk->lock_object.lo_name, slocked ? "share" : "",
1584 if ((lk->lk_lock & LK_SHARE) == 0) {
1585 if (lockmgr_recursed(lk)) {
1586 if (what & KA_NOTRECURSED)
1587 panic("Lock %s recursed @ %s:%d\n",
1588 lk->lock_object.lo_name, file,
1590 } else if (what & KA_RECURSED)
1591 panic("Lock %s not recursed @ %s:%d\n",
1592 lk->lock_object.lo_name, file, line);
1596 case KA_XLOCKED | KA_NOTRECURSED:
1597 case KA_XLOCKED | KA_RECURSED:
1598 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1599 panic("Lock %s not exclusively locked @ %s:%d\n",
1600 lk->lock_object.lo_name, file, line);
1601 if (lockmgr_recursed(lk)) {
1602 if (what & KA_NOTRECURSED)
1603 panic("Lock %s recursed @ %s:%d\n",
1604 lk->lock_object.lo_name, file, line);
1605 } else if (what & KA_RECURSED)
1606 panic("Lock %s not recursed @ %s:%d\n",
1607 lk->lock_object.lo_name, file, line);
1610 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1611 panic("Lock %s exclusively locked @ %s:%d\n",
1612 lk->lock_object.lo_name, file, line);
1615 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1623 lockmgr_chain(struct thread *td, struct thread **ownerp)
1629 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1631 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1632 if (lk->lk_lock & LK_SHARE)
1633 db_printf("SHARED (count %ju)\n",
1634 (uintmax_t)LK_SHARERS(lk->lk_lock));
1636 db_printf("EXCL\n");
1637 *ownerp = lockmgr_xholder(lk);
1643 db_show_lockmgr(const struct lock_object *lock)
1646 const struct lock *lk;
1648 lk = (const struct lock *)lock;
1650 db_printf(" state: ");
1651 if (lk->lk_lock == LK_UNLOCKED)
1652 db_printf("UNLOCKED\n");
1653 else if (lk->lk_lock & LK_SHARE)
1654 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1656 td = lockmgr_xholder(lk);
1657 if (td == (struct thread *)LK_KERNPROC)
1658 db_printf("XLOCK: LK_KERNPROC\n");
1660 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1661 td->td_tid, td->td_proc->p_pid,
1662 td->td_proc->p_comm);
1663 if (lockmgr_recursed(lk))
1664 db_printf(" recursed: %d\n", lk->lk_recurse);
1666 db_printf(" waiters: ");
1667 switch (lk->lk_lock & LK_ALL_WAITERS) {
1668 case LK_SHARED_WAITERS:
1669 db_printf("shared\n");
1671 case LK_EXCLUSIVE_WAITERS:
1672 db_printf("exclusive\n");
1674 case LK_ALL_WAITERS:
1675 db_printf("shared and exclusive\n");
1678 db_printf("none\n");
1680 db_printf(" spinners: ");
1681 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1682 db_printf("exclusive\n");
1684 db_printf("none\n");