2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/lockstat.h>
44 #include <sys/mutex.h>
46 #include <sys/sleepqueue.h>
48 #include <sys/stack.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
53 #include <machine/cpu.h>
60 #include <sys/pmckern.h>
61 PMC_SOFT_DECLARE( , , lock, failed);
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 #define SQ_EXCLUSIVE_QUEUE 0
68 #define SQ_SHARED_QUEUE 1
71 #define _lockmgr_assert(lk, what, file, line)
74 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
75 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
78 #define STACK_PRINT(lk)
79 #define STACK_SAVE(lk)
80 #define STACK_ZERO(lk)
82 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
83 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
84 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
87 #define LOCK_LOG2(lk, string, arg1, arg2) \
88 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
89 CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
91 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
92 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
94 #define GIANT_DECLARE \
96 WITNESS_SAVE_DECL(Giant)
97 #define GIANT_RESTORE() do { \
98 if (__predict_false(_i > 0)) { \
101 WITNESS_RESTORE(&Giant.lock_object, Giant); \
104 #define GIANT_SAVE() do { \
105 if (__predict_false(mtx_owned(&Giant))) { \
106 WITNESS_SAVE(&Giant.lock_object, Giant); \
107 while (mtx_owned(&Giant)) { \
109 mtx_unlock(&Giant); \
114 static bool __always_inline
115 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
118 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
121 if (fp || (!(x & LK_SHARE)))
123 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
124 (curthread->td_pflags & TDP_DEADLKTREAT))
129 #define LK_TRYOP(x) \
132 #define LK_CAN_WITNESS(x) \
133 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
134 #define LK_TRYWIT(x) \
135 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
137 #define lockmgr_disowned(lk) \
138 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
140 #define lockmgr_xlocked_v(v) \
141 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
143 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
145 static void assert_lockmgr(const struct lock_object *lock, int how);
147 static void db_show_lockmgr(const struct lock_object *lock);
149 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
151 static int owner_lockmgr(const struct lock_object *lock,
152 struct thread **owner);
154 static uintptr_t unlock_lockmgr(struct lock_object *lock);
156 struct lock_class lock_class_lockmgr = {
157 .lc_name = "lockmgr",
158 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
159 .lc_assert = assert_lockmgr,
161 .lc_ddb_show = db_show_lockmgr,
163 .lc_lock = lock_lockmgr,
164 .lc_unlock = unlock_lockmgr,
166 .lc_owner = owner_lockmgr,
170 struct lockmgr_wait {
176 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
178 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
181 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
183 struct lock_class *class;
185 if (flags & LK_INTERLOCK) {
186 class = LOCK_CLASS(ilk);
187 class->lc_unlock(ilk);
190 if (__predict_false(wakeup_swapper))
195 lockmgr_note_shared_acquire(struct lock *lk, int contested,
196 uint64_t waittime, const char *file, int line, int flags)
199 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
200 waittime, file, line, LOCKSTAT_READER);
201 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
202 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
203 TD_LOCKS_INC(curthread);
204 TD_SLOCKS_INC(curthread);
209 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
212 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
213 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
214 TD_LOCKS_DEC(curthread);
215 TD_SLOCKS_DEC(curthread);
219 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
220 uint64_t waittime, const char *file, int line, int flags)
223 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
224 waittime, file, line, LOCKSTAT_WRITER);
225 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
226 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
228 TD_LOCKS_INC(curthread);
233 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
236 if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
237 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
238 TD_LOCKS_DEC(curthread);
240 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
244 static __inline struct thread *
245 lockmgr_xholder(const struct lock *lk)
249 x = lockmgr_read_value(lk);
250 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
254 * It assumes sleepq_lock held and returns with this one unheld.
255 * It also assumes the generic interlock is sane and previously checked.
256 * If LK_INTERLOCK is specified the interlock is not reacquired after the
260 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
261 const char *wmesg, int pri, int timo, int queue)
264 struct lock_class *class;
267 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
268 catch = pri & PCATCH;
272 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
273 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
275 if (flags & LK_INTERLOCK)
276 class->lc_unlock(ilk);
277 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
280 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
281 SLEEPQ_INTERRUPTIBLE : 0), queue);
282 if ((flags & LK_TIMELOCK) && timo)
283 sleepq_set_timeout(&lk->lock_object, timo);
286 * Decisional switch for real sleeping.
288 if ((flags & LK_TIMELOCK) && timo && catch)
289 error = sleepq_timedwait_sig(&lk->lock_object, pri);
290 else if ((flags & LK_TIMELOCK) && timo)
291 error = sleepq_timedwait(&lk->lock_object, pri);
293 error = sleepq_wait_sig(&lk->lock_object, pri);
295 sleepq_wait(&lk->lock_object, pri);
297 if ((flags & LK_SLEEPFAIL) && error == 0)
304 wakeupshlk(struct lock *lk, const char *file, int line)
306 uintptr_t v, x, orig_x;
308 int queue, wakeup_swapper;
312 x = lockmgr_read_value(lk);
313 if (lockmgr_sunlock_try(lk, &x))
317 * We should have a sharer with waiters, so enter the hard
318 * path in order to handle wakeups correctly.
320 sleepq_lock(&lk->lock_object);
321 orig_x = lockmgr_read_value(lk);
323 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
327 * If the lock has exclusive waiters, give them preference in
328 * order to avoid deadlock with shared runners up.
329 * If interruptible sleeps left the exclusive queue empty
330 * avoid a starvation for the threads sleeping on the shared
331 * queue by giving them precedence and cleaning up the
332 * exclusive waiters bit anyway.
333 * Please note that lk_exslpfail count may be lying about
334 * the real number of waiters with the LK_SLEEPFAIL flag on
335 * because they may be used in conjunction with interruptible
336 * sleeps so lk_exslpfail might be considered an 'upper limit'
337 * bound, including the edge cases.
339 realexslp = sleepq_sleepcnt(&lk->lock_object,
341 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
342 if (lk->lk_exslpfail < realexslp) {
343 lk->lk_exslpfail = 0;
344 queue = SQ_EXCLUSIVE_QUEUE;
345 v |= (x & LK_SHARED_WAITERS);
347 lk->lk_exslpfail = 0;
349 "%s: %p has only LK_SLEEPFAIL sleepers",
352 "%s: %p waking up threads on the exclusive queue",
355 sleepq_broadcast(&lk->lock_object,
356 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
357 queue = SQ_SHARED_QUEUE;
363 * Exclusive waiters sleeping with LK_SLEEPFAIL on
364 * and using interruptible sleeps/timeout may have
365 * left spourious lk_exslpfail counts on, so clean
368 lk->lk_exslpfail = 0;
369 queue = SQ_SHARED_QUEUE;
372 if (lockmgr_sunlock_try(lk, &orig_x)) {
373 sleepq_release(&lk->lock_object);
377 x |= LK_SHARERS_LOCK(1);
378 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
382 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
383 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
385 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
387 sleepq_release(&lk->lock_object);
391 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
392 return (wakeup_swapper);
396 assert_lockmgr(const struct lock_object *lock, int what)
399 panic("lockmgr locks do not support assertions");
403 lock_lockmgr(struct lock_object *lock, uintptr_t how)
406 panic("lockmgr locks do not support sleep interlocking");
410 unlock_lockmgr(struct lock_object *lock)
413 panic("lockmgr locks do not support sleep interlocking");
418 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
421 panic("lockmgr locks do not support owner inquiring");
426 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
430 MPASS((flags & ~LK_INIT_MASK) == 0);
431 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
432 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
435 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
436 if (flags & LK_CANRECURSE)
437 iflags |= LO_RECURSABLE;
438 if ((flags & LK_NODUP) == 0)
440 if (flags & LK_NOPROFILE)
441 iflags |= LO_NOPROFILE;
442 if ((flags & LK_NOWITNESS) == 0)
443 iflags |= LO_WITNESS;
444 if (flags & LK_QUIET)
446 if (flags & LK_IS_VNODE)
447 iflags |= LO_IS_VNODE;
450 iflags |= flags & LK_NOSHARE;
452 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
453 lk->lk_lock = LK_UNLOCKED;
455 lk->lk_exslpfail = 0;
462 * XXX: Gross hacks to manipulate external lock flags after
463 * initialization. Used for certain vnode and buf locks.
466 lockallowshare(struct lock *lk)
469 lockmgr_assert(lk, KA_XLOCKED);
470 lk->lock_object.lo_flags &= ~LK_NOSHARE;
474 lockdisableshare(struct lock *lk)
477 lockmgr_assert(lk, KA_XLOCKED);
478 lk->lock_object.lo_flags |= LK_NOSHARE;
482 lockallowrecurse(struct lock *lk)
485 lockmgr_assert(lk, KA_XLOCKED);
486 lk->lock_object.lo_flags |= LO_RECURSABLE;
490 lockdisablerecurse(struct lock *lk)
493 lockmgr_assert(lk, KA_XLOCKED);
494 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
498 lockdestroy(struct lock *lk)
501 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
502 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
503 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
504 lock_destroy(&lk->lock_object);
507 static bool __always_inline
508 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
512 * If no other thread has an exclusive lock, or
513 * no exclusive waiter is present, bump the count of
514 * sharers. Since we have to preserve the state of
515 * waiters, if we fail to acquire the shared lock
516 * loop back and retry.
518 *xp = lockmgr_read_value(lk);
519 while (LK_CAN_SHARE(*xp, flags, fp)) {
520 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
521 *xp + LK_ONE_SHARER)) {
528 static bool __always_inline
529 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
533 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
534 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
535 *xp - LK_ONE_SHARER))
544 static __noinline int
545 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
546 const char *file, int line, struct lockmgr_wait *lwa)
554 uint64_t sleep_time = 0;
556 #ifdef LOCK_PROFILING
557 uint64_t waittime = 0;
561 if (KERNEL_PANICKED())
564 tid = (uintptr_t)curthread;
566 if (LK_CAN_WITNESS(flags))
567 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
568 file, line, flags & LK_INTERLOCK ? ilk : NULL);
570 if (lockmgr_slock_try(lk, &x, flags, false))
573 PMC_SOFT_CALL( , , lock, failed);
575 lock_profile_obtain_lock_failed(&lk->lock_object,
576 &contested, &waittime);
579 * If the lock is already held by curthread in
580 * exclusive way avoid a deadlock.
582 if (LK_HOLDER(x) == tid) {
584 "%s: %p already held in exclusive mode",
591 * If the lock is expected to not sleep just give up
594 if (LK_TRYOP(flags)) {
595 LOCK_LOG2(lk, "%s: %p fails the try operation",
602 * Acquire the sleepqueue chain lock because we
603 * probabilly will need to manipulate waiters flags.
605 sleepq_lock(&lk->lock_object);
606 x = lockmgr_read_value(lk);
610 * if the lock can be acquired in shared mode, try
613 if (LK_CAN_SHARE(x, flags, false)) {
614 sleepq_release(&lk->lock_object);
619 * Try to set the LK_SHARED_WAITERS flag. If we fail,
620 * loop back and retry.
622 if ((x & LK_SHARED_WAITERS) == 0) {
623 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
624 x | LK_SHARED_WAITERS)) {
627 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
632 iwmesg = lk->lock_object.lo_name;
636 iwmesg = lwa->iwmesg;
642 * As far as we have been unable to acquire the
643 * shared lock and the shared waiters flag is set,
647 sleep_time -= lockstat_nsecs(&lk->lock_object);
649 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
652 sleep_time += lockstat_nsecs(&lk->lock_object);
654 flags &= ~LK_INTERLOCK;
657 "%s: interrupted sleep for %p with %d",
658 __func__, lk, error);
661 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
667 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
668 LOCKSTAT_READER, (x & LK_SHARE) == 0,
669 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
671 #ifdef LOCK_PROFILING
672 lockmgr_note_shared_acquire(lk, contested, waittime,
675 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
681 lockmgr_exit(flags, ilk, 0);
685 static __noinline int
686 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
687 const char *file, int line, struct lockmgr_wait *lwa)
689 struct lock_class *class;
696 uint64_t sleep_time = 0;
698 #ifdef LOCK_PROFILING
699 uint64_t waittime = 0;
703 if (KERNEL_PANICKED())
706 tid = (uintptr_t)curthread;
708 if (LK_CAN_WITNESS(flags))
709 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
710 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
714 * If curthread already holds the lock and this one is
715 * allowed to recurse, simply recurse on it.
717 if (lockmgr_xlocked(lk)) {
718 if ((flags & LK_CANRECURSE) == 0 &&
719 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
721 * If the lock is expected to not panic just
722 * give up and return.
724 if (LK_TRYOP(flags)) {
726 "%s: %p fails the try operation",
731 if (flags & LK_INTERLOCK) {
732 class = LOCK_CLASS(ilk);
733 class->lc_unlock(ilk);
736 panic("%s: recursing on non recursive lockmgr %p "
737 "@ %s:%d\n", __func__, lk, file, line);
740 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
741 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
742 lk->lk_recurse, file, line);
743 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
744 LK_TRYWIT(flags), file, line);
745 TD_LOCKS_INC(curthread);
750 if (lk->lk_lock == LK_UNLOCKED &&
751 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
754 PMC_SOFT_CALL( , , lock, failed);
756 lock_profile_obtain_lock_failed(&lk->lock_object,
757 &contested, &waittime);
760 * If the lock is expected to not sleep just give up
763 if (LK_TRYOP(flags)) {
764 LOCK_LOG2(lk, "%s: %p fails the try operation",
771 * Acquire the sleepqueue chain lock because we
772 * probabilly will need to manipulate waiters flags.
774 sleepq_lock(&lk->lock_object);
775 x = lockmgr_read_value(lk);
779 * if the lock has been released while we spun on
780 * the sleepqueue chain lock just try again.
782 if (x == LK_UNLOCKED) {
783 sleepq_release(&lk->lock_object);
788 * The lock can be in the state where there is a
789 * pending queue of waiters, but still no owner.
790 * This happens when the lock is contested and an
791 * owner is going to claim the lock.
792 * If curthread is the one successfully acquiring it
793 * claim lock ownership and return, preserving waiters
796 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
797 if ((x & ~v) == LK_UNLOCKED) {
798 v &= ~LK_EXCLUSIVE_SPINNERS;
799 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
801 sleepq_release(&lk->lock_object);
803 "%s: %p claimed by a new writer",
811 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
812 * fail, loop back and retry.
814 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
815 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
816 x | LK_EXCLUSIVE_WAITERS)) {
819 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
824 iwmesg = lk->lock_object.lo_name;
828 iwmesg = lwa->iwmesg;
834 * As far as we have been unable to acquire the
835 * exclusive lock and the exclusive waiters flag
836 * is set, we will sleep.
839 sleep_time -= lockstat_nsecs(&lk->lock_object);
841 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
844 sleep_time += lockstat_nsecs(&lk->lock_object);
846 flags &= ~LK_INTERLOCK;
849 "%s: interrupted sleep for %p with %d",
850 __func__, lk, error);
853 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
859 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
860 LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
861 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
863 #ifdef LOCK_PROFILING
864 lockmgr_note_exclusive_acquire(lk, contested, waittime,
867 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
873 lockmgr_exit(flags, ilk, 0);
877 static __noinline int
878 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
879 const char *file, int line, struct lockmgr_wait *lwa)
881 uintptr_t tid, v, setv;
885 if (KERNEL_PANICKED())
888 tid = (uintptr_t)curthread;
890 _lockmgr_assert(lk, KA_SLOCKED, file, line);
892 op = flags & LK_TYPE_MASK;
893 v = lockmgr_read_value(lk);
895 if (LK_SHARERS_LOCK(v) > 1) {
896 if (op == LK_TRYUPGRADE) {
897 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
902 if (lockmgr_sunlock_try(lk, &v)) {
903 lockmgr_note_shared_release(lk, file, line);
907 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
910 setv |= (v & LK_ALL_WAITERS);
913 * Try to switch from one shared lock to an exclusive one.
914 * We need to preserve waiters flags during the operation.
916 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
917 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
919 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
920 LK_TRYWIT(flags), file, line);
921 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
922 TD_SLOCKS_DEC(curthread);
928 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
929 flags &= ~LK_INTERLOCK;
931 lockmgr_exit(flags, ilk, 0);
936 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
937 const char *file, int line)
939 struct lock_class *class;
944 if (KERNEL_PANICKED())
947 op = flags & LK_TYPE_MASK;
951 if (LK_CAN_WITNESS(flags))
952 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
953 file, line, flags & LK_INTERLOCK ? ilk : NULL);
954 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
956 if (lockmgr_slock_try(lk, &x, flags, true)) {
957 lockmgr_note_shared_acquire(lk, 0, 0,
961 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
966 if (LK_CAN_WITNESS(flags))
967 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
968 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
970 tid = (uintptr_t)curthread;
971 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
972 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
973 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
977 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
983 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
987 if (__predict_true(locked)) {
988 if (__predict_false(flags & LK_INTERLOCK)) {
989 class = LOCK_CLASS(ilk);
990 class->lc_unlock(ilk);
994 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
995 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
999 static __noinline int
1000 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1001 const char *file, int line)
1004 int wakeup_swapper = 0;
1006 if (KERNEL_PANICKED())
1009 wakeup_swapper = wakeupshlk(lk, file, line);
1012 lockmgr_exit(flags, ilk, wakeup_swapper);
1016 static __noinline int
1017 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1018 const char *file, int line)
1021 int wakeup_swapper = 0;
1025 if (KERNEL_PANICKED())
1028 tid = (uintptr_t)curthread;
1031 * As first option, treact the lock as if it has not
1033 * Fix-up the tid var if the lock has been disowned.
1035 if (LK_HOLDER(x) == LK_KERNPROC)
1039 * The lock is held in exclusive mode.
1040 * If the lock is recursed also, then unrecurse it.
1042 if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
1043 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1047 if (tid != LK_KERNPROC)
1048 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1051 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1054 sleepq_lock(&lk->lock_object);
1055 x = lockmgr_read_value(lk);
1059 * If the lock has exclusive waiters, give them
1060 * preference in order to avoid deadlock with
1061 * shared runners up.
1062 * If interruptible sleeps left the exclusive queue
1063 * empty avoid a starvation for the threads sleeping
1064 * on the shared queue by giving them precedence
1065 * and cleaning up the exclusive waiters bit anyway.
1066 * Please note that lk_exslpfail count may be lying
1067 * about the real number of waiters with the
1068 * LK_SLEEPFAIL flag on because they may be used in
1069 * conjunction with interruptible sleeps so
1070 * lk_exslpfail might be considered an 'upper limit'
1071 * bound, including the edge cases.
1073 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1074 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1075 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1076 if (lk->lk_exslpfail < realexslp) {
1077 lk->lk_exslpfail = 0;
1078 queue = SQ_EXCLUSIVE_QUEUE;
1079 v |= (x & LK_SHARED_WAITERS);
1081 lk->lk_exslpfail = 0;
1083 "%s: %p has only LK_SLEEPFAIL sleepers",
1086 "%s: %p waking up threads on the exclusive queue",
1088 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1089 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1090 queue = SQ_SHARED_QUEUE;
1095 * Exclusive waiters sleeping with LK_SLEEPFAIL
1096 * on and using interruptible sleeps/timeout
1097 * may have left spourious lk_exslpfail counts
1098 * on, so clean it up anyway.
1100 lk->lk_exslpfail = 0;
1101 queue = SQ_SHARED_QUEUE;
1104 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1105 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1107 atomic_store_rel_ptr(&lk->lk_lock, v);
1108 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1109 sleepq_release(&lk->lock_object);
1112 lockmgr_exit(flags, ilk, wakeup_swapper);
1117 * Lightweight entry points for common operations.
1119 * Functionality is similar to sx locks, in that none of the additional lockmgr
1120 * features are supported. To be clear, these are NOT supported:
1121 * 1. shared locking disablement
1122 * 2. returning with an error after sleep
1123 * 3. unlocking the interlock
1125 * If in doubt, use lockmgr_lock_flags.
1128 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1132 MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1133 MPASS((flags & LK_INTERLOCK) == 0);
1134 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1136 if (LK_CAN_WITNESS(flags))
1137 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1139 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1140 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1144 return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL));
1148 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1152 MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1153 MPASS((flags & LK_INTERLOCK) == 0);
1155 if (LK_CAN_WITNESS(flags))
1156 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1157 LOP_EXCLUSIVE, file, line, NULL);
1158 tid = (uintptr_t)curthread;
1159 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1160 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1165 return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL));
1169 lockmgr_unlock(struct lock *lk)
1178 _lockmgr_assert(lk, KA_LOCKED, file, line);
1179 x = lockmgr_read_value(lk);
1180 if (__predict_true(x & LK_SHARE) != 0) {
1181 lockmgr_note_shared_release(lk, file, line);
1182 if (lockmgr_sunlock_try(lk, &x)) {
1183 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1185 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1188 tid = (uintptr_t)curthread;
1189 lockmgr_note_exclusive_release(lk, file, line);
1190 if (!lockmgr_recursed(lk) &&
1191 atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1192 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
1194 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1201 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1202 const char *wmesg, int pri, int timo, const char *file, int line)
1205 struct lockmgr_wait lwa;
1206 struct lock_class *class;
1208 uintptr_t tid, v, x;
1209 u_int op, realexslp;
1210 int error, ipri, itimo, queue, wakeup_swapper;
1211 #ifdef LOCK_PROFILING
1212 uint64_t waittime = 0;
1216 if (KERNEL_PANICKED())
1220 tid = (uintptr_t)curthread;
1221 op = (flags & LK_TYPE_MASK);
1222 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1223 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1224 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1226 lwa.iwmesg = iwmesg;
1230 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1231 KASSERT((op & (op - 1)) == 0,
1232 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1233 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1234 (op != LK_DOWNGRADE && op != LK_RELEASE),
1235 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1236 __func__, file, line));
1237 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1238 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1239 __func__, file, line));
1240 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1241 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1242 lk->lock_object.lo_name, file, line));
1244 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1246 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1254 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1256 if (flags & LK_INTERLOCK)
1257 class->lc_unlock(ilk);
1265 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1269 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1272 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1275 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1276 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1279 * Panic if the lock is recursed.
1281 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1282 if (flags & LK_INTERLOCK)
1283 class->lc_unlock(ilk);
1284 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1285 __func__, iwmesg, file, line);
1287 TD_SLOCKS_INC(curthread);
1290 * In order to preserve waiters flags, just spin.
1293 x = lockmgr_read_value(lk);
1294 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1295 x &= LK_ALL_WAITERS;
1296 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1297 LK_SHARERS_LOCK(1) | x))
1301 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1302 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1305 _lockmgr_assert(lk, KA_LOCKED, file, line);
1306 x = lockmgr_read_value(lk);
1308 if (__predict_true(x & LK_SHARE) != 0) {
1309 lockmgr_note_shared_release(lk, file, line);
1310 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1312 lockmgr_note_exclusive_release(lk, file, line);
1313 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1317 if (LK_CAN_WITNESS(flags))
1318 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1319 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1323 * Trying to drain a lock we already own will result in a
1326 if (lockmgr_xlocked(lk)) {
1327 if (flags & LK_INTERLOCK)
1328 class->lc_unlock(ilk);
1329 panic("%s: draining %s with the lock held @ %s:%d\n",
1330 __func__, iwmesg, file, line);
1334 if (lk->lk_lock == LK_UNLOCKED &&
1335 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1339 PMC_SOFT_CALL( , , lock, failed);
1341 lock_profile_obtain_lock_failed(&lk->lock_object,
1342 &contested, &waittime);
1345 * If the lock is expected to not sleep just give up
1348 if (LK_TRYOP(flags)) {
1349 LOCK_LOG2(lk, "%s: %p fails the try operation",
1356 * Acquire the sleepqueue chain lock because we
1357 * probabilly will need to manipulate waiters flags.
1359 sleepq_lock(&lk->lock_object);
1360 x = lockmgr_read_value(lk);
1363 * if the lock has been released while we spun on
1364 * the sleepqueue chain lock just try again.
1366 if (x == LK_UNLOCKED) {
1367 sleepq_release(&lk->lock_object);
1371 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1372 if ((x & ~v) == LK_UNLOCKED) {
1373 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1376 * If interruptible sleeps left the exclusive
1377 * queue empty avoid a starvation for the
1378 * threads sleeping on the shared queue by
1379 * giving them precedence and cleaning up the
1380 * exclusive waiters bit anyway.
1381 * Please note that lk_exslpfail count may be
1382 * lying about the real number of waiters with
1383 * the LK_SLEEPFAIL flag on because they may
1384 * be used in conjunction with interruptible
1385 * sleeps so lk_exslpfail might be considered
1386 * an 'upper limit' bound, including the edge
1389 if (v & LK_EXCLUSIVE_WAITERS) {
1390 queue = SQ_EXCLUSIVE_QUEUE;
1391 v &= ~LK_EXCLUSIVE_WAITERS;
1395 * Exclusive waiters sleeping with
1396 * LK_SLEEPFAIL on and using
1397 * interruptible sleeps/timeout may
1398 * have left spourious lk_exslpfail
1399 * counts on, so clean it up anyway.
1401 MPASS(v & LK_SHARED_WAITERS);
1402 lk->lk_exslpfail = 0;
1403 queue = SQ_SHARED_QUEUE;
1404 v &= ~LK_SHARED_WAITERS;
1406 if (queue == SQ_EXCLUSIVE_QUEUE) {
1408 sleepq_sleepcnt(&lk->lock_object,
1409 SQ_EXCLUSIVE_QUEUE);
1410 if (lk->lk_exslpfail >= realexslp) {
1411 lk->lk_exslpfail = 0;
1412 queue = SQ_SHARED_QUEUE;
1413 v &= ~LK_SHARED_WAITERS;
1414 if (realexslp != 0) {
1416 "%s: %p has only LK_SLEEPFAIL sleepers",
1419 "%s: %p waking up threads on the exclusive queue",
1425 SQ_EXCLUSIVE_QUEUE);
1428 lk->lk_exslpfail = 0;
1430 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1431 sleepq_release(&lk->lock_object);
1435 "%s: %p waking up all threads on the %s queue",
1436 __func__, lk, queue == SQ_SHARED_QUEUE ?
1437 "shared" : "exclusive");
1438 wakeup_swapper |= sleepq_broadcast(
1439 &lk->lock_object, SLEEPQ_LK, 0, queue);
1442 * If shared waiters have been woken up we need
1443 * to wait for one of them to acquire the lock
1444 * before to set the exclusive waiters in
1445 * order to avoid a deadlock.
1447 if (queue == SQ_SHARED_QUEUE) {
1448 for (v = lk->lk_lock;
1449 (v & LK_SHARE) && !LK_SHARERS(v);
1456 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1457 * fail, loop back and retry.
1459 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1460 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1461 x | LK_EXCLUSIVE_WAITERS)) {
1462 sleepq_release(&lk->lock_object);
1465 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1470 * As far as we have been unable to acquire the
1471 * exclusive lock and the exclusive waiters flag
1472 * is set, we will sleep.
1474 if (flags & LK_INTERLOCK) {
1475 class->lc_unlock(ilk);
1476 flags &= ~LK_INTERLOCK;
1479 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1480 SQ_EXCLUSIVE_QUEUE);
1481 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1483 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1488 lock_profile_obtain_lock_success(&lk->lock_object,
1489 contested, waittime, file, line);
1490 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1491 lk->lk_recurse, file, line);
1492 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1493 LK_TRYWIT(flags), file, line);
1494 TD_LOCKS_INC(curthread);
1499 if (flags & LK_INTERLOCK)
1500 class->lc_unlock(ilk);
1501 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1504 if (flags & LK_INTERLOCK)
1505 class->lc_unlock(ilk);
1513 _lockmgr_disown(struct lock *lk, const char *file, int line)
1517 if (SCHEDULER_STOPPED())
1520 tid = (uintptr_t)curthread;
1521 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1524 * Panic if the lock is recursed.
1526 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1527 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1528 __func__, file, line);
1531 * If the owner is already LK_KERNPROC just skip the whole operation.
1533 if (LK_HOLDER(lk->lk_lock) != tid)
1535 lock_profile_release_lock(&lk->lock_object);
1536 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1537 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1538 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1539 TD_LOCKS_DEC(curthread);
1543 * In order to preserve waiters flags, just spin.
1546 x = lockmgr_read_value(lk);
1547 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1548 x &= LK_ALL_WAITERS;
1549 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1557 lockmgr_printinfo(const struct lock *lk)
1562 if (lk->lk_lock == LK_UNLOCKED)
1563 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1564 else if (lk->lk_lock & LK_SHARE)
1565 printf("lock type %s: SHARED (count %ju)\n",
1566 lk->lock_object.lo_name,
1567 (uintmax_t)LK_SHARERS(lk->lk_lock));
1569 td = lockmgr_xholder(lk);
1570 if (td == (struct thread *)LK_KERNPROC)
1571 printf("lock type %s: EXCL by KERNPROC\n",
1572 lk->lock_object.lo_name);
1574 printf("lock type %s: EXCL by thread %p "
1575 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1576 td, td->td_proc->p_pid, td->td_proc->p_comm,
1581 if (x & LK_EXCLUSIVE_WAITERS)
1582 printf(" with exclusive waiters pending\n");
1583 if (x & LK_SHARED_WAITERS)
1584 printf(" with shared waiters pending\n");
1585 if (x & LK_EXCLUSIVE_SPINNERS)
1586 printf(" with exclusive spinners pending\n");
1592 lockstatus(const struct lock *lk)
1598 x = lockmgr_read_value(lk);
1601 if ((x & LK_SHARE) == 0) {
1602 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1606 } else if (x == LK_UNLOCKED)
1612 #ifdef INVARIANT_SUPPORT
1614 FEATURE(invariant_support,
1615 "Support for modules compiled with INVARIANTS option");
1618 #undef _lockmgr_assert
1622 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1626 if (KERNEL_PANICKED())
1630 case KA_SLOCKED | KA_NOTRECURSED:
1631 case KA_SLOCKED | KA_RECURSED:
1634 case KA_LOCKED | KA_NOTRECURSED:
1635 case KA_LOCKED | KA_RECURSED:
1639 * We cannot trust WITNESS if the lock is held in exclusive
1640 * mode and a call to lockmgr_disown() happened.
1641 * Workaround this skipping the check if the lock is held in
1642 * exclusive mode even for the KA_LOCKED case.
1644 if (slocked || (lk->lk_lock & LK_SHARE)) {
1645 witness_assert(&lk->lock_object, what, file, line);
1649 if (lk->lk_lock == LK_UNLOCKED ||
1650 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1651 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1652 panic("Lock %s not %slocked @ %s:%d\n",
1653 lk->lock_object.lo_name, slocked ? "share" : "",
1656 if ((lk->lk_lock & LK_SHARE) == 0) {
1657 if (lockmgr_recursed(lk)) {
1658 if (what & KA_NOTRECURSED)
1659 panic("Lock %s recursed @ %s:%d\n",
1660 lk->lock_object.lo_name, file,
1662 } else if (what & KA_RECURSED)
1663 panic("Lock %s not recursed @ %s:%d\n",
1664 lk->lock_object.lo_name, file, line);
1668 case KA_XLOCKED | KA_NOTRECURSED:
1669 case KA_XLOCKED | KA_RECURSED:
1670 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1671 panic("Lock %s not exclusively locked @ %s:%d\n",
1672 lk->lock_object.lo_name, file, line);
1673 if (lockmgr_recursed(lk)) {
1674 if (what & KA_NOTRECURSED)
1675 panic("Lock %s recursed @ %s:%d\n",
1676 lk->lock_object.lo_name, file, line);
1677 } else if (what & KA_RECURSED)
1678 panic("Lock %s not recursed @ %s:%d\n",
1679 lk->lock_object.lo_name, file, line);
1682 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1683 panic("Lock %s exclusively locked @ %s:%d\n",
1684 lk->lock_object.lo_name, file, line);
1687 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1695 lockmgr_chain(struct thread *td, struct thread **ownerp)
1697 const struct lock *lk;
1701 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1703 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1704 if (lk->lk_lock & LK_SHARE)
1705 db_printf("SHARED (count %ju)\n",
1706 (uintmax_t)LK_SHARERS(lk->lk_lock));
1708 db_printf("EXCL\n");
1709 *ownerp = lockmgr_xholder(lk);
1715 db_show_lockmgr(const struct lock_object *lock)
1718 const struct lock *lk;
1720 lk = (const struct lock *)lock;
1722 db_printf(" state: ");
1723 if (lk->lk_lock == LK_UNLOCKED)
1724 db_printf("UNLOCKED\n");
1725 else if (lk->lk_lock & LK_SHARE)
1726 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1728 td = lockmgr_xholder(lk);
1729 if (td == (struct thread *)LK_KERNPROC)
1730 db_printf("XLOCK: LK_KERNPROC\n");
1732 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1733 td->td_tid, td->td_proc->p_pid,
1734 td->td_proc->p_comm);
1735 if (lockmgr_recursed(lk))
1736 db_printf(" recursed: %d\n", lk->lk_recurse);
1738 db_printf(" waiters: ");
1739 switch (lk->lk_lock & LK_ALL_WAITERS) {
1740 case LK_SHARED_WAITERS:
1741 db_printf("shared\n");
1743 case LK_EXCLUSIVE_WAITERS:
1744 db_printf("exclusive\n");
1746 case LK_ALL_WAITERS:
1747 db_printf("shared and exclusive\n");
1750 db_printf("none\n");
1752 db_printf(" spinners: ");
1753 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1754 db_printf("exclusive\n");
1756 db_printf("none\n");