2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/lockstat.h>
42 #include <sys/mutex.h>
44 #include <sys/sleepqueue.h>
46 #include <sys/stack.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include <machine/cpu.h>
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
62 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
63 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
65 #define SQ_EXCLUSIVE_QUEUE 0
66 #define SQ_SHARED_QUEUE 1
69 #define _lockmgr_assert(lk, what, file, line)
72 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
73 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
76 #define STACK_PRINT(lk)
77 #define STACK_SAVE(lk)
78 #define STACK_ZERO(lk)
80 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
81 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
82 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
85 #define LOCK_LOG2(lk, string, arg1, arg2) \
86 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
87 CTR2(KTR_LOCK, (string), (arg1), (arg2))
88 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
89 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
90 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92 #define GIANT_DECLARE \
94 WITNESS_SAVE_DECL(Giant)
95 #define GIANT_RESTORE() do { \
96 if (__predict_false(_i > 0)) { \
99 WITNESS_RESTORE(&Giant.lock_object, Giant); \
102 #define GIANT_SAVE() do { \
103 if (__predict_false(mtx_owned(&Giant))) { \
104 WITNESS_SAVE(&Giant.lock_object, Giant); \
105 while (mtx_owned(&Giant)) { \
107 mtx_unlock(&Giant); \
112 static bool __always_inline
113 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
116 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
119 if (fp || (!(x & LK_SHARE)))
121 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
122 (curthread->td_pflags & TDP_DEADLKTREAT))
127 #define LK_TRYOP(x) \
130 #define LK_CAN_WITNESS(x) \
131 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
132 #define LK_TRYWIT(x) \
133 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
135 #define lockmgr_disowned(lk) \
136 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
138 #define lockmgr_xlocked_v(v) \
139 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
141 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
143 static void assert_lockmgr(const struct lock_object *lock, int how);
145 static void db_show_lockmgr(const struct lock_object *lock);
147 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
149 static int owner_lockmgr(const struct lock_object *lock,
150 struct thread **owner);
152 static uintptr_t unlock_lockmgr(struct lock_object *lock);
154 struct lock_class lock_class_lockmgr = {
155 .lc_name = "lockmgr",
156 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
157 .lc_assert = assert_lockmgr,
159 .lc_ddb_show = db_show_lockmgr,
161 .lc_lock = lock_lockmgr,
162 .lc_unlock = unlock_lockmgr,
164 .lc_owner = owner_lockmgr,
168 static __read_mostly bool lk_adaptive = true;
169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
170 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
172 #define lockmgr_delay locks_delay
174 struct lockmgr_wait {
180 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
182 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
185 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
187 struct lock_class *class;
189 if (flags & LK_INTERLOCK) {
190 class = LOCK_CLASS(ilk);
191 class->lc_unlock(ilk);
194 if (__predict_false(wakeup_swapper))
199 lockmgr_note_shared_acquire(struct lock *lk, int contested,
200 uint64_t waittime, const char *file, int line, int flags)
203 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
204 waittime, file, line, LOCKSTAT_READER);
205 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
206 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
207 TD_LOCKS_INC(curthread);
208 TD_SLOCKS_INC(curthread);
213 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
216 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
217 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
218 TD_LOCKS_DEC(curthread);
219 TD_SLOCKS_DEC(curthread);
223 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
224 uint64_t waittime, const char *file, int line, int flags)
227 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
228 waittime, file, line, LOCKSTAT_WRITER);
229 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
230 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
232 TD_LOCKS_INC(curthread);
237 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
240 if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
241 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
242 TD_LOCKS_DEC(curthread);
244 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
248 static __inline struct thread *
249 lockmgr_xholder(const struct lock *lk)
253 x = lockmgr_read_value(lk);
254 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
258 * It assumes sleepq_lock held and returns with this one unheld.
259 * It also assumes the generic interlock is sane and previously checked.
260 * If LK_INTERLOCK is specified the interlock is not reacquired after the
264 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
265 const char *wmesg, int pri, int timo, int queue)
268 struct lock_class *class;
271 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
272 catch = pri & PCATCH;
276 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
277 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
279 if (flags & LK_INTERLOCK)
280 class->lc_unlock(ilk);
281 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
284 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
285 SLEEPQ_INTERRUPTIBLE : 0), queue);
286 if ((flags & LK_TIMELOCK) && timo)
287 sleepq_set_timeout(&lk->lock_object, timo);
290 * Decisional switch for real sleeping.
292 if ((flags & LK_TIMELOCK) && timo && catch)
293 error = sleepq_timedwait_sig(&lk->lock_object, pri);
294 else if ((flags & LK_TIMELOCK) && timo)
295 error = sleepq_timedwait(&lk->lock_object, pri);
297 error = sleepq_wait_sig(&lk->lock_object, pri);
299 sleepq_wait(&lk->lock_object, pri);
301 if ((flags & LK_SLEEPFAIL) && error == 0)
308 wakeupshlk(struct lock *lk, const char *file, int line)
310 uintptr_t v, x, orig_x;
312 int queue, wakeup_swapper;
316 x = lockmgr_read_value(lk);
317 if (lockmgr_sunlock_try(lk, &x))
321 * We should have a sharer with waiters, so enter the hard
322 * path in order to handle wakeups correctly.
324 sleepq_lock(&lk->lock_object);
325 orig_x = lockmgr_read_value(lk);
327 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
331 * If the lock has exclusive waiters, give them preference in
332 * order to avoid deadlock with shared runners up.
333 * If interruptible sleeps left the exclusive queue empty
334 * avoid a starvation for the threads sleeping on the shared
335 * queue by giving them precedence and cleaning up the
336 * exclusive waiters bit anyway.
337 * Please note that lk_exslpfail count may be lying about
338 * the real number of waiters with the LK_SLEEPFAIL flag on
339 * because they may be used in conjunction with interruptible
340 * sleeps so lk_exslpfail might be considered an 'upper limit'
341 * bound, including the edge cases.
343 realexslp = sleepq_sleepcnt(&lk->lock_object,
345 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
346 if (lk->lk_exslpfail < realexslp) {
347 lk->lk_exslpfail = 0;
348 queue = SQ_EXCLUSIVE_QUEUE;
349 v |= (x & LK_SHARED_WAITERS);
351 lk->lk_exslpfail = 0;
353 "%s: %p has only LK_SLEEPFAIL sleepers",
356 "%s: %p waking up threads on the exclusive queue",
359 sleepq_broadcast(&lk->lock_object,
360 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
361 queue = SQ_SHARED_QUEUE;
366 * Exclusive waiters sleeping with LK_SLEEPFAIL on
367 * and using interruptible sleeps/timeout may have
368 * left spourious lk_exslpfail counts on, so clean
371 lk->lk_exslpfail = 0;
372 queue = SQ_SHARED_QUEUE;
375 if (lockmgr_sunlock_try(lk, &orig_x)) {
376 sleepq_release(&lk->lock_object);
380 x |= LK_SHARERS_LOCK(1);
381 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
385 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
386 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
388 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
390 sleepq_release(&lk->lock_object);
394 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
395 return (wakeup_swapper);
399 assert_lockmgr(const struct lock_object *lock, int what)
402 panic("lockmgr locks do not support assertions");
406 lock_lockmgr(struct lock_object *lock, uintptr_t how)
409 panic("lockmgr locks do not support sleep interlocking");
413 unlock_lockmgr(struct lock_object *lock)
416 panic("lockmgr locks do not support sleep interlocking");
421 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
424 panic("lockmgr locks do not support owner inquiring");
429 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
433 MPASS((flags & ~LK_INIT_MASK) == 0);
434 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
435 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
438 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
439 if (flags & LK_CANRECURSE)
440 iflags |= LO_RECURSABLE;
441 if ((flags & LK_NODUP) == 0)
443 if (flags & LK_NOPROFILE)
444 iflags |= LO_NOPROFILE;
445 if ((flags & LK_NOWITNESS) == 0)
446 iflags |= LO_WITNESS;
447 if (flags & LK_QUIET)
449 if (flags & LK_IS_VNODE)
450 iflags |= LO_IS_VNODE;
453 iflags |= flags & LK_NOSHARE;
455 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
456 lk->lk_lock = LK_UNLOCKED;
458 lk->lk_exslpfail = 0;
465 * XXX: Gross hacks to manipulate external lock flags after
466 * initialization. Used for certain vnode and buf locks.
469 lockallowshare(struct lock *lk)
472 lockmgr_assert(lk, KA_XLOCKED);
473 lk->lock_object.lo_flags &= ~LK_NOSHARE;
477 lockdisableshare(struct lock *lk)
480 lockmgr_assert(lk, KA_XLOCKED);
481 lk->lock_object.lo_flags |= LK_NOSHARE;
485 lockallowrecurse(struct lock *lk)
488 lockmgr_assert(lk, KA_XLOCKED);
489 lk->lock_object.lo_flags |= LO_RECURSABLE;
493 lockdisablerecurse(struct lock *lk)
496 lockmgr_assert(lk, KA_XLOCKED);
497 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
501 lockdestroy(struct lock *lk)
504 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
505 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
506 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
507 lock_destroy(&lk->lock_object);
510 static bool __always_inline
511 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
515 * If no other thread has an exclusive lock, or
516 * no exclusive waiter is present, bump the count of
517 * sharers. Since we have to preserve the state of
518 * waiters, if we fail to acquire the shared lock
519 * loop back and retry.
521 while (LK_CAN_SHARE(*xp, flags, fp)) {
522 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
523 *xp + LK_ONE_SHARER)) {
530 static bool __always_inline
531 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
535 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
536 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
537 *xp - LK_ONE_SHARER))
547 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
550 struct thread *owner;
554 MPASS(x != LK_UNLOCKED);
555 owner = (struct thread *)LK_HOLDER(x);
557 MPASS(owner != curthread);
558 if (owner == (struct thread *)LK_KERNPROC)
560 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
564 if (!TD_IS_RUNNING(owner))
566 if ((x & LK_ALL_WAITERS) != 0)
569 x = lockmgr_read_value(lk);
570 if (LK_CAN_SHARE(x, flags, false)) {
574 owner = (struct thread *)LK_HOLDER(x);
578 static __noinline int
579 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
580 const char *file, int line, struct lockmgr_wait *lwa)
588 uint64_t sleep_time = 0;
590 #ifdef LOCK_PROFILING
591 uint64_t waittime = 0;
594 struct lock_delay_arg lda;
596 if (SCHEDULER_STOPPED())
599 tid = (uintptr_t)curthread;
601 if (LK_CAN_WITNESS(flags))
602 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
603 file, line, flags & LK_INTERLOCK ? ilk : NULL);
604 x = lockmgr_read_value(lk);
605 lock_delay_arg_init(&lda, &lockmgr_delay);
607 flags &= ~LK_ADAPTIVE;
609 * The lock may already be locked exclusive by curthread,
612 if (LK_HOLDER(x) == tid) {
614 "%s: %p already held in exclusive mode",
621 if (lockmgr_slock_try(lk, &x, flags, false))
624 lock_profile_obtain_lock_failed(&lk->lock_object, false,
625 &contested, &waittime);
627 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
628 if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
633 PMC_SOFT_CALL( , , lock, failed);
637 * If the lock is expected to not sleep just give up
640 if (LK_TRYOP(flags)) {
641 LOCK_LOG2(lk, "%s: %p fails the try operation",
648 * Acquire the sleepqueue chain lock because we
649 * probabilly will need to manipulate waiters flags.
651 sleepq_lock(&lk->lock_object);
652 x = lockmgr_read_value(lk);
656 * if the lock can be acquired in shared mode, try
659 if (LK_CAN_SHARE(x, flags, false)) {
660 sleepq_release(&lk->lock_object);
665 * Try to set the LK_SHARED_WAITERS flag. If we fail,
666 * loop back and retry.
668 if ((x & LK_SHARED_WAITERS) == 0) {
669 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
670 x | LK_SHARED_WAITERS)) {
673 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
678 iwmesg = lk->lock_object.lo_name;
682 iwmesg = lwa->iwmesg;
688 * As far as we have been unable to acquire the
689 * shared lock and the shared waiters flag is set,
693 sleep_time -= lockstat_nsecs(&lk->lock_object);
695 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
698 sleep_time += lockstat_nsecs(&lk->lock_object);
700 flags &= ~LK_INTERLOCK;
703 "%s: interrupted sleep for %p with %d",
704 __func__, lk, error);
707 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
709 x = lockmgr_read_value(lk);
714 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
715 LOCKSTAT_READER, (x & LK_SHARE) == 0,
716 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
718 #ifdef LOCK_PROFILING
719 lockmgr_note_shared_acquire(lk, contested, waittime,
722 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
728 lockmgr_exit(flags, ilk, 0);
733 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
735 struct thread *owner;
739 MPASS(x != LK_UNLOCKED);
740 owner = (struct thread *)LK_HOLDER(x);
742 MPASS(owner != curthread);
745 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
747 if (owner == (struct thread *)LK_KERNPROC)
749 if (!TD_IS_RUNNING(owner))
751 if ((x & LK_ALL_WAITERS) != 0)
754 x = lockmgr_read_value(lk);
755 if (x == LK_UNLOCKED) {
759 owner = (struct thread *)LK_HOLDER(x);
763 static __noinline int
764 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
765 const char *file, int line, struct lockmgr_wait *lwa)
767 struct lock_class *class;
774 uint64_t sleep_time = 0;
776 #ifdef LOCK_PROFILING
777 uint64_t waittime = 0;
780 struct lock_delay_arg lda;
782 if (SCHEDULER_STOPPED())
785 tid = (uintptr_t)curthread;
787 if (LK_CAN_WITNESS(flags))
788 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
789 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
793 * If curthread already holds the lock and this one is
794 * allowed to recurse, simply recurse on it.
796 if (lockmgr_xlocked(lk)) {
797 if ((flags & LK_CANRECURSE) == 0 &&
798 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
800 * If the lock is expected to not panic just
801 * give up and return.
803 if (LK_TRYOP(flags)) {
805 "%s: %p fails the try operation",
810 if (flags & LK_INTERLOCK) {
811 class = LOCK_CLASS(ilk);
812 class->lc_unlock(ilk);
815 panic("%s: recursing on non recursive lockmgr %p "
816 "@ %s:%d\n", __func__, lk, file, line);
818 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
820 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
821 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
822 lk->lk_recurse, file, line);
823 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
824 LK_TRYWIT(flags), file, line);
825 TD_LOCKS_INC(curthread);
830 lock_delay_arg_init(&lda, &lockmgr_delay);
832 flags &= ~LK_ADAPTIVE;
834 if (x == LK_UNLOCKED) {
835 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
840 lock_profile_obtain_lock_failed(&lk->lock_object, false,
841 &contested, &waittime);
843 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
844 if (lockmgr_xlock_adaptive(&lda, lk, &x))
848 PMC_SOFT_CALL( , , lock, failed);
852 * If the lock is expected to not sleep just give up
855 if (LK_TRYOP(flags)) {
856 LOCK_LOG2(lk, "%s: %p fails the try operation",
863 * Acquire the sleepqueue chain lock because we
864 * probabilly will need to manipulate waiters flags.
866 sleepq_lock(&lk->lock_object);
867 x = lockmgr_read_value(lk);
871 * if the lock has been released while we spun on
872 * the sleepqueue chain lock just try again.
874 if (x == LK_UNLOCKED) {
875 sleepq_release(&lk->lock_object);
880 * The lock can be in the state where there is a
881 * pending queue of waiters, but still no owner.
882 * This happens when the lock is contested and an
883 * owner is going to claim the lock.
884 * If curthread is the one successfully acquiring it
885 * claim lock ownership and return, preserving waiters
888 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
889 if ((x & ~v) == LK_UNLOCKED) {
890 v &= ~LK_EXCLUSIVE_SPINNERS;
891 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
893 sleepq_release(&lk->lock_object);
895 "%s: %p claimed by a new writer",
903 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
904 * fail, loop back and retry.
906 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
907 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
908 x | LK_EXCLUSIVE_WAITERS)) {
911 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
916 iwmesg = lk->lock_object.lo_name;
920 iwmesg = lwa->iwmesg;
926 * As far as we have been unable to acquire the
927 * exclusive lock and the exclusive waiters flag
928 * is set, we will sleep.
931 sleep_time -= lockstat_nsecs(&lk->lock_object);
933 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
936 sleep_time += lockstat_nsecs(&lk->lock_object);
938 flags &= ~LK_INTERLOCK;
941 "%s: interrupted sleep for %p with %d",
942 __func__, lk, error);
945 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
947 x = lockmgr_read_value(lk);
952 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
953 LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
954 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
956 #ifdef LOCK_PROFILING
957 lockmgr_note_exclusive_acquire(lk, contested, waittime,
960 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
966 lockmgr_exit(flags, ilk, 0);
970 static __noinline int
971 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
972 const char *file, int line, struct lockmgr_wait *lwa)
974 uintptr_t tid, v, setv;
978 if (SCHEDULER_STOPPED())
981 tid = (uintptr_t)curthread;
983 _lockmgr_assert(lk, KA_SLOCKED, file, line);
985 op = flags & LK_TYPE_MASK;
986 v = lockmgr_read_value(lk);
988 if (LK_SHARERS(v) > 1) {
989 if (op == LK_TRYUPGRADE) {
990 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
995 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
996 v - LK_ONE_SHARER)) {
997 lockmgr_note_shared_release(lk, file, line);
1002 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1005 setv |= (v & LK_ALL_WAITERS);
1008 * Try to switch from one shared lock to an exclusive one.
1009 * We need to preserve waiters flags during the operation.
1011 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1012 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1014 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1015 LK_TRYWIT(flags), file, line);
1016 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1017 TD_SLOCKS_DEC(curthread);
1023 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1024 flags &= ~LK_INTERLOCK;
1026 lockmgr_exit(flags, ilk, 0);
1031 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1032 const char *file, int line)
1034 struct lock_class *class;
1039 if (SCHEDULER_STOPPED())
1042 op = flags & LK_TYPE_MASK;
1046 if (LK_CAN_WITNESS(flags))
1047 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1048 file, line, flags & LK_INTERLOCK ? ilk : NULL);
1049 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1051 x = lockmgr_read_value(lk);
1052 if (lockmgr_slock_try(lk, &x, flags, true)) {
1053 lockmgr_note_shared_acquire(lk, 0, 0,
1057 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1062 if (LK_CAN_WITNESS(flags))
1063 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1064 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1066 tid = (uintptr_t)curthread;
1067 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1068 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1069 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1073 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1079 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1083 if (__predict_true(locked)) {
1084 if (__predict_false(flags & LK_INTERLOCK)) {
1085 class = LOCK_CLASS(ilk);
1086 class->lc_unlock(ilk);
1090 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1091 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1095 static __noinline int
1096 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1097 const char *file, int line)
1100 int wakeup_swapper = 0;
1102 if (SCHEDULER_STOPPED())
1105 wakeup_swapper = wakeupshlk(lk, file, line);
1108 lockmgr_exit(flags, ilk, wakeup_swapper);
1112 static __noinline int
1113 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1114 const char *file, int line)
1117 int wakeup_swapper = 0;
1121 if (SCHEDULER_STOPPED())
1124 tid = (uintptr_t)curthread;
1127 * As first option, treact the lock as if it has not
1129 * Fix-up the tid var if the lock has been disowned.
1131 if (LK_HOLDER(x) == LK_KERNPROC)
1135 * The lock is held in exclusive mode.
1136 * If the lock is recursed also, then unrecurse it.
1138 if (lockmgr_recursed_v(x)) {
1139 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1141 if (lk->lk_recurse == 0)
1142 atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1145 if (tid != LK_KERNPROC)
1146 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1149 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1152 sleepq_lock(&lk->lock_object);
1153 x = lockmgr_read_value(lk);
1157 * If the lock has exclusive waiters, give them
1158 * preference in order to avoid deadlock with
1159 * shared runners up.
1160 * If interruptible sleeps left the exclusive queue
1161 * empty avoid a starvation for the threads sleeping
1162 * on the shared queue by giving them precedence
1163 * and cleaning up the exclusive waiters bit anyway.
1164 * Please note that lk_exslpfail count may be lying
1165 * about the real number of waiters with the
1166 * LK_SLEEPFAIL flag on because they may be used in
1167 * conjunction with interruptible sleeps so
1168 * lk_exslpfail might be considered an 'upper limit'
1169 * bound, including the edge cases.
1171 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1172 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1173 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1174 if (lk->lk_exslpfail < realexslp) {
1175 lk->lk_exslpfail = 0;
1176 queue = SQ_EXCLUSIVE_QUEUE;
1177 v |= (x & LK_SHARED_WAITERS);
1179 lk->lk_exslpfail = 0;
1181 "%s: %p has only LK_SLEEPFAIL sleepers",
1184 "%s: %p waking up threads on the exclusive queue",
1186 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1187 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1188 queue = SQ_SHARED_QUEUE;
1192 * Exclusive waiters sleeping with LK_SLEEPFAIL
1193 * on and using interruptible sleeps/timeout
1194 * may have left spourious lk_exslpfail counts
1195 * on, so clean it up anyway.
1197 lk->lk_exslpfail = 0;
1198 queue = SQ_SHARED_QUEUE;
1201 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1202 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1204 atomic_store_rel_ptr(&lk->lk_lock, v);
1205 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1206 sleepq_release(&lk->lock_object);
1209 lockmgr_exit(flags, ilk, wakeup_swapper);
1214 * Lightweight entry points for common operations.
1216 * Functionality is similar to sx locks, in that none of the additional lockmgr
1217 * features are supported. To be clear, these are NOT supported:
1218 * 1. shared locking disablement
1219 * 2. returning with an error after sleep
1220 * 3. unlocking the interlock
1222 * If in doubt, use lockmgr_lock_flags.
1225 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1229 MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1230 MPASS((flags & LK_INTERLOCK) == 0);
1231 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1233 if (LK_CAN_WITNESS(flags))
1234 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1236 x = lockmgr_read_value(lk);
1237 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1238 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1242 return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1246 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1250 MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1251 MPASS((flags & LK_INTERLOCK) == 0);
1253 if (LK_CAN_WITNESS(flags))
1254 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1255 LOP_EXCLUSIVE, file, line, NULL);
1256 tid = (uintptr_t)curthread;
1257 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1258 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1263 return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1267 lockmgr_unlock(struct lock *lk)
1276 _lockmgr_assert(lk, KA_LOCKED, file, line);
1277 x = lockmgr_read_value(lk);
1278 if (__predict_true(x & LK_SHARE) != 0) {
1279 lockmgr_note_shared_release(lk, file, line);
1280 if (lockmgr_sunlock_try(lk, &x)) {
1281 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1283 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1286 tid = (uintptr_t)curthread;
1287 lockmgr_note_exclusive_release(lk, file, line);
1288 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1289 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1291 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1298 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1299 const char *wmesg, int pri, int timo, const char *file, int line)
1302 struct lockmgr_wait lwa;
1303 struct lock_class *class;
1305 uintptr_t tid, v, x;
1306 u_int op, realexslp;
1307 int error, ipri, itimo, queue, wakeup_swapper;
1308 #ifdef LOCK_PROFILING
1309 uint64_t waittime = 0;
1313 if (SCHEDULER_STOPPED())
1317 tid = (uintptr_t)curthread;
1318 op = (flags & LK_TYPE_MASK);
1319 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1320 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1321 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1323 lwa.iwmesg = iwmesg;
1327 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1328 KASSERT((op & (op - 1)) == 0,
1329 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1330 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1331 (op != LK_DOWNGRADE && op != LK_RELEASE),
1332 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1333 __func__, file, line));
1334 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1335 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1336 __func__, file, line));
1337 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1338 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1339 lk->lock_object.lo_name, file, line));
1341 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1343 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1351 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1353 if (flags & LK_INTERLOCK)
1354 class->lc_unlock(ilk);
1362 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1366 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1369 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1372 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1373 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1376 * Panic if the lock is recursed.
1378 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1379 if (flags & LK_INTERLOCK)
1380 class->lc_unlock(ilk);
1381 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1382 __func__, iwmesg, file, line);
1384 TD_SLOCKS_INC(curthread);
1387 * In order to preserve waiters flags, just spin.
1390 x = lockmgr_read_value(lk);
1391 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1392 x &= LK_ALL_WAITERS;
1393 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1394 LK_SHARERS_LOCK(1) | x))
1398 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1399 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1402 _lockmgr_assert(lk, KA_LOCKED, file, line);
1403 x = lockmgr_read_value(lk);
1405 if (__predict_true(x & LK_SHARE) != 0) {
1406 lockmgr_note_shared_release(lk, file, line);
1407 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1409 lockmgr_note_exclusive_release(lk, file, line);
1410 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1414 if (LK_CAN_WITNESS(flags))
1415 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1416 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1420 * Trying to drain a lock we already own will result in a
1423 if (lockmgr_xlocked(lk)) {
1424 if (flags & LK_INTERLOCK)
1425 class->lc_unlock(ilk);
1426 panic("%s: draining %s with the lock held @ %s:%d\n",
1427 __func__, iwmesg, file, line);
1431 if (lk->lk_lock == LK_UNLOCKED &&
1432 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1436 PMC_SOFT_CALL( , , lock, failed);
1438 lock_profile_obtain_lock_failed(&lk->lock_object, false,
1439 &contested, &waittime);
1442 * If the lock is expected to not sleep just give up
1445 if (LK_TRYOP(flags)) {
1446 LOCK_LOG2(lk, "%s: %p fails the try operation",
1453 * Acquire the sleepqueue chain lock because we
1454 * probabilly will need to manipulate waiters flags.
1456 sleepq_lock(&lk->lock_object);
1457 x = lockmgr_read_value(lk);
1460 * if the lock has been released while we spun on
1461 * the sleepqueue chain lock just try again.
1463 if (x == LK_UNLOCKED) {
1464 sleepq_release(&lk->lock_object);
1468 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1469 if ((x & ~v) == LK_UNLOCKED) {
1470 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1473 * If interruptible sleeps left the exclusive
1474 * queue empty avoid a starvation for the
1475 * threads sleeping on the shared queue by
1476 * giving them precedence and cleaning up the
1477 * exclusive waiters bit anyway.
1478 * Please note that lk_exslpfail count may be
1479 * lying about the real number of waiters with
1480 * the LK_SLEEPFAIL flag on because they may
1481 * be used in conjunction with interruptible
1482 * sleeps so lk_exslpfail might be considered
1483 * an 'upper limit' bound, including the edge
1486 if (v & LK_EXCLUSIVE_WAITERS) {
1487 queue = SQ_EXCLUSIVE_QUEUE;
1488 v &= ~LK_EXCLUSIVE_WAITERS;
1491 * Exclusive waiters sleeping with
1492 * LK_SLEEPFAIL on and using
1493 * interruptible sleeps/timeout may
1494 * have left spourious lk_exslpfail
1495 * counts on, so clean it up anyway.
1497 MPASS(v & LK_SHARED_WAITERS);
1498 lk->lk_exslpfail = 0;
1499 queue = SQ_SHARED_QUEUE;
1500 v &= ~LK_SHARED_WAITERS;
1502 if (queue == SQ_EXCLUSIVE_QUEUE) {
1504 sleepq_sleepcnt(&lk->lock_object,
1505 SQ_EXCLUSIVE_QUEUE);
1506 if (lk->lk_exslpfail >= realexslp) {
1507 lk->lk_exslpfail = 0;
1508 queue = SQ_SHARED_QUEUE;
1509 v &= ~LK_SHARED_WAITERS;
1510 if (realexslp != 0) {
1512 "%s: %p has only LK_SLEEPFAIL sleepers",
1515 "%s: %p waking up threads on the exclusive queue",
1521 SQ_EXCLUSIVE_QUEUE);
1524 lk->lk_exslpfail = 0;
1526 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1527 sleepq_release(&lk->lock_object);
1531 "%s: %p waking up all threads on the %s queue",
1532 __func__, lk, queue == SQ_SHARED_QUEUE ?
1533 "shared" : "exclusive");
1534 wakeup_swapper |= sleepq_broadcast(
1535 &lk->lock_object, SLEEPQ_LK, 0, queue);
1538 * If shared waiters have been woken up we need
1539 * to wait for one of them to acquire the lock
1540 * before to set the exclusive waiters in
1541 * order to avoid a deadlock.
1543 if (queue == SQ_SHARED_QUEUE) {
1544 for (v = lk->lk_lock;
1545 (v & LK_SHARE) && !LK_SHARERS(v);
1552 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1553 * fail, loop back and retry.
1555 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1556 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1557 x | LK_EXCLUSIVE_WAITERS)) {
1558 sleepq_release(&lk->lock_object);
1561 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1566 * As far as we have been unable to acquire the
1567 * exclusive lock and the exclusive waiters flag
1568 * is set, we will sleep.
1570 if (flags & LK_INTERLOCK) {
1571 class->lc_unlock(ilk);
1572 flags &= ~LK_INTERLOCK;
1575 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1576 SQ_EXCLUSIVE_QUEUE);
1577 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1579 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1584 lock_profile_obtain_lock_success(&lk->lock_object,
1585 false, contested, waittime, file, line);
1586 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1587 lk->lk_recurse, file, line);
1588 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1589 LK_TRYWIT(flags), file, line);
1590 TD_LOCKS_INC(curthread);
1595 if (flags & LK_INTERLOCK)
1596 class->lc_unlock(ilk);
1597 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1600 if (flags & LK_INTERLOCK)
1601 class->lc_unlock(ilk);
1609 _lockmgr_disown(struct lock *lk, const char *file, int line)
1613 if (SCHEDULER_STOPPED())
1616 tid = (uintptr_t)curthread;
1617 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1620 * Panic if the lock is recursed.
1622 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1623 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1624 __func__, file, line);
1627 * If the owner is already LK_KERNPROC just skip the whole operation.
1629 if (LK_HOLDER(lk->lk_lock) != tid)
1631 lock_profile_release_lock(&lk->lock_object, false);
1632 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1633 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1634 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1635 TD_LOCKS_DEC(curthread);
1639 * In order to preserve waiters flags, just spin.
1642 x = lockmgr_read_value(lk);
1643 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1644 x &= LK_ALL_WAITERS;
1645 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1653 lockmgr_printinfo(const struct lock *lk)
1658 if (lk->lk_lock == LK_UNLOCKED)
1659 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1660 else if (lk->lk_lock & LK_SHARE)
1661 printf("lock type %s: SHARED (count %ju)\n",
1662 lk->lock_object.lo_name,
1663 (uintmax_t)LK_SHARERS(lk->lk_lock));
1665 td = lockmgr_xholder(lk);
1666 if (td == (struct thread *)LK_KERNPROC)
1667 printf("lock type %s: EXCL by KERNPROC\n",
1668 lk->lock_object.lo_name);
1670 printf("lock type %s: EXCL by thread %p "
1671 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1672 td, td->td_proc->p_pid, td->td_proc->p_comm,
1677 if (x & LK_EXCLUSIVE_WAITERS)
1678 printf(" with exclusive waiters pending\n");
1679 if (x & LK_SHARED_WAITERS)
1680 printf(" with shared waiters pending\n");
1681 if (x & LK_EXCLUSIVE_SPINNERS)
1682 printf(" with exclusive spinners pending\n");
1688 lockstatus(const struct lock *lk)
1694 x = lockmgr_read_value(lk);
1697 if ((x & LK_SHARE) == 0) {
1698 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1702 } else if (x == LK_UNLOCKED)
1708 #ifdef INVARIANT_SUPPORT
1710 FEATURE(invariant_support,
1711 "Support for modules compiled with INVARIANTS option");
1714 #undef _lockmgr_assert
1718 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1722 if (SCHEDULER_STOPPED())
1726 case KA_SLOCKED | KA_NOTRECURSED:
1727 case KA_SLOCKED | KA_RECURSED:
1730 case KA_LOCKED | KA_NOTRECURSED:
1731 case KA_LOCKED | KA_RECURSED:
1735 * We cannot trust WITNESS if the lock is held in exclusive
1736 * mode and a call to lockmgr_disown() happened.
1737 * Workaround this skipping the check if the lock is held in
1738 * exclusive mode even for the KA_LOCKED case.
1740 if (slocked || (lk->lk_lock & LK_SHARE)) {
1741 witness_assert(&lk->lock_object, what, file, line);
1745 if (lk->lk_lock == LK_UNLOCKED ||
1746 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1747 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1748 panic("Lock %s not %slocked @ %s:%d\n",
1749 lk->lock_object.lo_name, slocked ? "share" : "",
1752 if ((lk->lk_lock & LK_SHARE) == 0) {
1753 if (lockmgr_recursed(lk)) {
1754 if (what & KA_NOTRECURSED)
1755 panic("Lock %s recursed @ %s:%d\n",
1756 lk->lock_object.lo_name, file,
1758 } else if (what & KA_RECURSED)
1759 panic("Lock %s not recursed @ %s:%d\n",
1760 lk->lock_object.lo_name, file, line);
1764 case KA_XLOCKED | KA_NOTRECURSED:
1765 case KA_XLOCKED | KA_RECURSED:
1766 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1767 panic("Lock %s not exclusively locked @ %s:%d\n",
1768 lk->lock_object.lo_name, file, line);
1769 if (lockmgr_recursed(lk)) {
1770 if (what & KA_NOTRECURSED)
1771 panic("Lock %s recursed @ %s:%d\n",
1772 lk->lock_object.lo_name, file, line);
1773 } else if (what & KA_RECURSED)
1774 panic("Lock %s not recursed @ %s:%d\n",
1775 lk->lock_object.lo_name, file, line);
1778 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1779 panic("Lock %s exclusively locked @ %s:%d\n",
1780 lk->lock_object.lo_name, file, line);
1783 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1791 lockmgr_chain(struct thread *td, struct thread **ownerp)
1793 const struct lock *lk;
1797 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1799 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1800 if (lk->lk_lock & LK_SHARE)
1801 db_printf("SHARED (count %ju)\n",
1802 (uintmax_t)LK_SHARERS(lk->lk_lock));
1804 db_printf("EXCL\n");
1805 *ownerp = lockmgr_xholder(lk);
1811 db_show_lockmgr(const struct lock_object *lock)
1814 const struct lock *lk;
1816 lk = (const struct lock *)lock;
1818 db_printf(" state: ");
1819 if (lk->lk_lock == LK_UNLOCKED)
1820 db_printf("UNLOCKED\n");
1821 else if (lk->lk_lock & LK_SHARE)
1822 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1824 td = lockmgr_xholder(lk);
1825 if (td == (struct thread *)LK_KERNPROC)
1826 db_printf("XLOCK: LK_KERNPROC\n");
1828 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1829 td->td_tid, td->td_proc->p_pid,
1830 td->td_proc->p_comm);
1831 if (lockmgr_recursed(lk))
1832 db_printf(" recursed: %d\n", lk->lk_recurse);
1834 db_printf(" waiters: ");
1835 switch (lk->lk_lock & LK_ALL_WAITERS) {
1836 case LK_SHARED_WAITERS:
1837 db_printf("shared\n");
1839 case LK_EXCLUSIVE_WAITERS:
1840 db_printf("exclusive\n");
1842 case LK_ALL_WAITERS:
1843 db_printf("shared and exclusive\n");
1846 db_printf("none\n");
1848 db_printf(" spinners: ");
1849 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1850 db_printf("exclusive\n");
1852 db_printf("none\n");