2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/param.h>
37 #include <sys/limits.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/lockstat.h>
42 #include <sys/mutex.h>
44 #include <sys/sleepqueue.h>
46 #include <sys/stack.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include <machine/cpu.h>
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
63 * Hack. There should be prio_t or similar so that this is not necessary.
65 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
66 "prio flags wont fit in u_short pri in struct lock");
68 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
69 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
71 #define SQ_EXCLUSIVE_QUEUE 0
72 #define SQ_SHARED_QUEUE 1
75 #define _lockmgr_assert(lk, what, file, line)
78 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
79 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
82 #define STACK_PRINT(lk)
83 #define STACK_SAVE(lk)
84 #define STACK_ZERO(lk)
86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
87 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
88 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
91 #define LOCK_LOG2(lk, string, arg1, arg2) \
92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
93 CTR2(KTR_LOCK, (string), (arg1), (arg2))
94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
96 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98 #define GIANT_DECLARE \
100 WITNESS_SAVE_DECL(Giant)
101 #define GIANT_RESTORE() do { \
102 if (__predict_false(_i > 0)) { \
105 WITNESS_RESTORE(&Giant.lock_object, Giant); \
108 #define GIANT_SAVE() do { \
109 if (__predict_false(mtx_owned(&Giant))) { \
110 WITNESS_SAVE(&Giant.lock_object, Giant); \
111 while (mtx_owned(&Giant)) { \
113 mtx_unlock(&Giant); \
118 static bool __always_inline
119 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
122 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
125 if (fp || (!(x & LK_SHARE)))
127 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
128 (curthread->td_pflags & TDP_DEADLKTREAT))
133 #define LK_TRYOP(x) \
136 #define LK_CAN_WITNESS(x) \
137 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
138 #define LK_TRYWIT(x) \
139 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
141 #define lockmgr_disowned(lk) \
142 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
144 #define lockmgr_xlocked_v(v) \
145 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
147 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
149 static void assert_lockmgr(const struct lock_object *lock, int how);
151 static void db_show_lockmgr(const struct lock_object *lock);
153 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
155 static int owner_lockmgr(const struct lock_object *lock,
156 struct thread **owner);
158 static uintptr_t unlock_lockmgr(struct lock_object *lock);
160 struct lock_class lock_class_lockmgr = {
161 .lc_name = "lockmgr",
162 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
163 .lc_assert = assert_lockmgr,
165 .lc_ddb_show = db_show_lockmgr,
167 .lc_lock = lock_lockmgr,
168 .lc_unlock = unlock_lockmgr,
170 .lc_owner = owner_lockmgr,
174 static __read_mostly bool lk_adaptive = true;
175 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
176 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
178 #define lockmgr_delay locks_delay
180 struct lockmgr_wait {
186 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
188 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
191 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
193 struct lock_class *class;
195 if (flags & LK_INTERLOCK) {
196 class = LOCK_CLASS(ilk);
197 class->lc_unlock(ilk);
200 if (__predict_false(wakeup_swapper))
205 lockmgr_note_shared_acquire(struct lock *lk, int contested,
206 uint64_t waittime, const char *file, int line, int flags)
209 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
210 waittime, file, line, LOCKSTAT_READER);
211 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
212 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
213 TD_LOCKS_INC(curthread);
214 TD_SLOCKS_INC(curthread);
219 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
222 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
223 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
224 TD_LOCKS_DEC(curthread);
225 TD_SLOCKS_DEC(curthread);
229 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
230 uint64_t waittime, const char *file, int line, int flags)
233 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
234 waittime, file, line, LOCKSTAT_WRITER);
235 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
236 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
238 TD_LOCKS_INC(curthread);
243 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
246 if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
247 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
248 TD_LOCKS_DEC(curthread);
250 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
254 static __inline struct thread *
255 lockmgr_xholder(const struct lock *lk)
259 x = lockmgr_read_value(lk);
260 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
264 * It assumes sleepq_lock held and returns with this one unheld.
265 * It also assumes the generic interlock is sane and previously checked.
266 * If LK_INTERLOCK is specified the interlock is not reacquired after the
270 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
271 const char *wmesg, int pri, int timo, int queue)
274 struct lock_class *class;
277 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
278 catch = pri & PCATCH;
282 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
283 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
285 if (flags & LK_INTERLOCK)
286 class->lc_unlock(ilk);
287 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
288 if (lk->lk_exslpfail < USHRT_MAX)
292 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
293 SLEEPQ_INTERRUPTIBLE : 0), queue);
294 if ((flags & LK_TIMELOCK) && timo)
295 sleepq_set_timeout(&lk->lock_object, timo);
298 * Decisional switch for real sleeping.
300 if ((flags & LK_TIMELOCK) && timo && catch)
301 error = sleepq_timedwait_sig(&lk->lock_object, pri);
302 else if ((flags & LK_TIMELOCK) && timo)
303 error = sleepq_timedwait(&lk->lock_object, pri);
305 error = sleepq_wait_sig(&lk->lock_object, pri);
307 sleepq_wait(&lk->lock_object, pri);
309 if ((flags & LK_SLEEPFAIL) && error == 0)
316 wakeupshlk(struct lock *lk, const char *file, int line)
318 uintptr_t v, x, orig_x;
320 int queue, wakeup_swapper;
324 x = lockmgr_read_value(lk);
325 if (lockmgr_sunlock_try(lk, &x))
329 * We should have a sharer with waiters, so enter the hard
330 * path in order to handle wakeups correctly.
332 sleepq_lock(&lk->lock_object);
333 orig_x = lockmgr_read_value(lk);
335 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
339 * If the lock has exclusive waiters, give them preference in
340 * order to avoid deadlock with shared runners up.
341 * If interruptible sleeps left the exclusive queue empty
342 * avoid a starvation for the threads sleeping on the shared
343 * queue by giving them precedence and cleaning up the
344 * exclusive waiters bit anyway.
345 * Please note that lk_exslpfail count may be lying about
346 * the real number of waiters with the LK_SLEEPFAIL flag on
347 * because they may be used in conjunction with interruptible
348 * sleeps so lk_exslpfail might be considered an 'upper limit'
349 * bound, including the edge cases.
351 realexslp = sleepq_sleepcnt(&lk->lock_object,
353 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
354 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
355 lk->lk_exslpfail = 0;
356 queue = SQ_EXCLUSIVE_QUEUE;
357 v |= (x & LK_SHARED_WAITERS);
359 lk->lk_exslpfail = 0;
361 "%s: %p has only LK_SLEEPFAIL sleepers",
364 "%s: %p waking up threads on the exclusive queue",
367 sleepq_broadcast(&lk->lock_object,
368 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
369 queue = SQ_SHARED_QUEUE;
373 * Exclusive waiters sleeping with LK_SLEEPFAIL on
374 * and using interruptible sleeps/timeout may have
375 * left spourious lk_exslpfail counts on, so clean
378 lk->lk_exslpfail = 0;
379 queue = SQ_SHARED_QUEUE;
382 if (lockmgr_sunlock_try(lk, &orig_x)) {
383 sleepq_release(&lk->lock_object);
387 x |= LK_SHARERS_LOCK(1);
388 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
392 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
393 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
395 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
397 sleepq_release(&lk->lock_object);
401 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
402 return (wakeup_swapper);
406 assert_lockmgr(const struct lock_object *lock, int what)
409 panic("lockmgr locks do not support assertions");
413 lock_lockmgr(struct lock_object *lock, uintptr_t how)
416 panic("lockmgr locks do not support sleep interlocking");
420 unlock_lockmgr(struct lock_object *lock)
423 panic("lockmgr locks do not support sleep interlocking");
428 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
431 panic("lockmgr locks do not support owner inquiring");
436 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
440 MPASS((flags & ~LK_INIT_MASK) == 0);
441 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
442 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
445 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
446 if (flags & LK_CANRECURSE)
447 iflags |= LO_RECURSABLE;
448 if ((flags & LK_NODUP) == 0)
450 if (flags & LK_NOPROFILE)
451 iflags |= LO_NOPROFILE;
452 if ((flags & LK_NOWITNESS) == 0)
453 iflags |= LO_WITNESS;
454 if (flags & LK_QUIET)
456 if (flags & LK_IS_VNODE)
457 iflags |= LO_IS_VNODE;
460 iflags |= flags & LK_NOSHARE;
462 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
463 lk->lk_lock = LK_UNLOCKED;
465 lk->lk_exslpfail = 0;
472 * XXX: Gross hacks to manipulate external lock flags after
473 * initialization. Used for certain vnode and buf locks.
476 lockallowshare(struct lock *lk)
479 lockmgr_assert(lk, KA_XLOCKED);
480 lk->lock_object.lo_flags &= ~LK_NOSHARE;
484 lockdisableshare(struct lock *lk)
487 lockmgr_assert(lk, KA_XLOCKED);
488 lk->lock_object.lo_flags |= LK_NOSHARE;
492 lockallowrecurse(struct lock *lk)
495 lockmgr_assert(lk, KA_XLOCKED);
496 lk->lock_object.lo_flags |= LO_RECURSABLE;
500 lockdisablerecurse(struct lock *lk)
503 lockmgr_assert(lk, KA_XLOCKED);
504 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
508 lockdestroy(struct lock *lk)
511 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
512 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
513 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
514 lock_destroy(&lk->lock_object);
517 static bool __always_inline
518 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
522 * If no other thread has an exclusive lock, or
523 * no exclusive waiter is present, bump the count of
524 * sharers. Since we have to preserve the state of
525 * waiters, if we fail to acquire the shared lock
526 * loop back and retry.
528 while (LK_CAN_SHARE(*xp, flags, fp)) {
529 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
530 *xp + LK_ONE_SHARER)) {
537 static bool __always_inline
538 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
542 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
543 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
544 *xp - LK_ONE_SHARER))
554 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
557 struct thread *owner;
561 MPASS(x != LK_UNLOCKED);
562 owner = (struct thread *)LK_HOLDER(x);
564 MPASS(owner != curthread);
565 if (owner == (struct thread *)LK_KERNPROC)
567 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
571 if (!TD_IS_RUNNING(owner))
573 if ((x & LK_ALL_WAITERS) != 0)
576 x = lockmgr_read_value(lk);
577 if (LK_CAN_SHARE(x, flags, false)) {
581 owner = (struct thread *)LK_HOLDER(x);
585 static __noinline int
586 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
587 const char *file, int line, struct lockmgr_wait *lwa)
595 uint64_t sleep_time = 0;
597 #ifdef LOCK_PROFILING
598 uint64_t waittime = 0;
601 struct lock_delay_arg lda;
603 if (SCHEDULER_STOPPED())
606 tid = (uintptr_t)curthread;
608 if (LK_CAN_WITNESS(flags))
609 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
610 file, line, flags & LK_INTERLOCK ? ilk : NULL);
611 x = lockmgr_read_value(lk);
612 lock_delay_arg_init(&lda, &lockmgr_delay);
614 flags &= ~LK_ADAPTIVE;
616 * The lock may already be locked exclusive by curthread,
619 if (LK_HOLDER(x) == tid) {
621 "%s: %p already held in exclusive mode",
628 if (lockmgr_slock_try(lk, &x, flags, false))
631 lock_profile_obtain_lock_failed(&lk->lock_object, false,
632 &contested, &waittime);
634 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
635 if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
640 PMC_SOFT_CALL( , , lock, failed);
644 * If the lock is expected to not sleep just give up
647 if (LK_TRYOP(flags)) {
648 LOCK_LOG2(lk, "%s: %p fails the try operation",
655 * Acquire the sleepqueue chain lock because we
656 * probabilly will need to manipulate waiters flags.
658 sleepq_lock(&lk->lock_object);
659 x = lockmgr_read_value(lk);
663 * if the lock can be acquired in shared mode, try
666 if (LK_CAN_SHARE(x, flags, false)) {
667 sleepq_release(&lk->lock_object);
672 * Try to set the LK_SHARED_WAITERS flag. If we fail,
673 * loop back and retry.
675 if ((x & LK_SHARED_WAITERS) == 0) {
676 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
677 x | LK_SHARED_WAITERS)) {
680 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
685 iwmesg = lk->lock_object.lo_name;
689 iwmesg = lwa->iwmesg;
695 * As far as we have been unable to acquire the
696 * shared lock and the shared waiters flag is set,
700 sleep_time -= lockstat_nsecs(&lk->lock_object);
702 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
705 sleep_time += lockstat_nsecs(&lk->lock_object);
707 flags &= ~LK_INTERLOCK;
710 "%s: interrupted sleep for %p with %d",
711 __func__, lk, error);
714 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
716 x = lockmgr_read_value(lk);
721 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
722 LOCKSTAT_READER, (x & LK_SHARE) == 0,
723 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
725 #ifdef LOCK_PROFILING
726 lockmgr_note_shared_acquire(lk, contested, waittime,
729 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
735 lockmgr_exit(flags, ilk, 0);
740 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
742 struct thread *owner;
746 MPASS(x != LK_UNLOCKED);
747 owner = (struct thread *)LK_HOLDER(x);
749 MPASS(owner != curthread);
752 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
754 if (owner == (struct thread *)LK_KERNPROC)
756 if (!TD_IS_RUNNING(owner))
758 if ((x & LK_ALL_WAITERS) != 0)
761 x = lockmgr_read_value(lk);
762 if (x == LK_UNLOCKED) {
766 owner = (struct thread *)LK_HOLDER(x);
770 static __noinline int
771 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
772 const char *file, int line, struct lockmgr_wait *lwa)
774 struct lock_class *class;
781 uint64_t sleep_time = 0;
783 #ifdef LOCK_PROFILING
784 uint64_t waittime = 0;
787 struct lock_delay_arg lda;
789 if (SCHEDULER_STOPPED())
792 tid = (uintptr_t)curthread;
794 if (LK_CAN_WITNESS(flags))
795 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
796 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
800 * If curthread already holds the lock and this one is
801 * allowed to recurse, simply recurse on it.
803 if (lockmgr_xlocked(lk)) {
804 if ((flags & LK_CANRECURSE) == 0 &&
805 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
807 * If the lock is expected to not panic just
808 * give up and return.
810 if (LK_TRYOP(flags)) {
812 "%s: %p fails the try operation",
817 if (flags & LK_INTERLOCK) {
818 class = LOCK_CLASS(ilk);
819 class->lc_unlock(ilk);
822 panic("%s: recursing on non recursive lockmgr %p "
823 "@ %s:%d\n", __func__, lk, file, line);
825 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
827 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
828 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
829 lk->lk_recurse, file, line);
830 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
831 LK_TRYWIT(flags), file, line);
832 TD_LOCKS_INC(curthread);
837 lock_delay_arg_init(&lda, &lockmgr_delay);
839 flags &= ~LK_ADAPTIVE;
841 if (x == LK_UNLOCKED) {
842 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
847 lock_profile_obtain_lock_failed(&lk->lock_object, false,
848 &contested, &waittime);
850 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
851 if (lockmgr_xlock_adaptive(&lda, lk, &x))
855 PMC_SOFT_CALL( , , lock, failed);
859 * If the lock is expected to not sleep just give up
862 if (LK_TRYOP(flags)) {
863 LOCK_LOG2(lk, "%s: %p fails the try operation",
870 * Acquire the sleepqueue chain lock because we
871 * probabilly will need to manipulate waiters flags.
873 sleepq_lock(&lk->lock_object);
874 x = lockmgr_read_value(lk);
878 * if the lock has been released while we spun on
879 * the sleepqueue chain lock just try again.
881 if (x == LK_UNLOCKED) {
882 sleepq_release(&lk->lock_object);
887 * The lock can be in the state where there is a
888 * pending queue of waiters, but still no owner.
889 * This happens when the lock is contested and an
890 * owner is going to claim the lock.
891 * If curthread is the one successfully acquiring it
892 * claim lock ownership and return, preserving waiters
895 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
896 if ((x & ~v) == LK_UNLOCKED) {
897 v &= ~LK_EXCLUSIVE_SPINNERS;
898 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
900 sleepq_release(&lk->lock_object);
902 "%s: %p claimed by a new writer",
910 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
911 * fail, loop back and retry.
913 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
914 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
915 x | LK_EXCLUSIVE_WAITERS)) {
918 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
923 iwmesg = lk->lock_object.lo_name;
927 iwmesg = lwa->iwmesg;
933 * As far as we have been unable to acquire the
934 * exclusive lock and the exclusive waiters flag
935 * is set, we will sleep.
938 sleep_time -= lockstat_nsecs(&lk->lock_object);
940 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
943 sleep_time += lockstat_nsecs(&lk->lock_object);
945 flags &= ~LK_INTERLOCK;
948 "%s: interrupted sleep for %p with %d",
949 __func__, lk, error);
952 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
954 x = lockmgr_read_value(lk);
959 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
960 LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
961 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
963 #ifdef LOCK_PROFILING
964 lockmgr_note_exclusive_acquire(lk, contested, waittime,
967 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
973 lockmgr_exit(flags, ilk, 0);
977 static __noinline int
978 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
979 const char *file, int line, struct lockmgr_wait *lwa)
981 uintptr_t tid, v, setv;
985 if (SCHEDULER_STOPPED())
988 tid = (uintptr_t)curthread;
990 _lockmgr_assert(lk, KA_SLOCKED, file, line);
992 op = flags & LK_TYPE_MASK;
993 v = lockmgr_read_value(lk);
995 if (LK_SHARERS(v) > 1) {
996 if (op == LK_TRYUPGRADE) {
997 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
1002 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1003 v - LK_ONE_SHARER)) {
1004 lockmgr_note_shared_release(lk, file, line);
1009 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1012 setv |= (v & LK_ALL_WAITERS);
1015 * Try to switch from one shared lock to an exclusive one.
1016 * We need to preserve waiters flags during the operation.
1018 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1019 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1021 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1022 LK_TRYWIT(flags), file, line);
1023 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1024 TD_SLOCKS_DEC(curthread);
1030 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1031 flags &= ~LK_INTERLOCK;
1033 lockmgr_exit(flags, ilk, 0);
1038 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1039 const char *file, int line)
1041 struct lock_class *class;
1046 if (SCHEDULER_STOPPED())
1049 op = flags & LK_TYPE_MASK;
1053 if (LK_CAN_WITNESS(flags))
1054 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1055 file, line, flags & LK_INTERLOCK ? ilk : NULL);
1056 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1058 x = lockmgr_read_value(lk);
1059 if (lockmgr_slock_try(lk, &x, flags, true)) {
1060 lockmgr_note_shared_acquire(lk, 0, 0,
1064 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1069 if (LK_CAN_WITNESS(flags))
1070 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1071 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1073 tid = (uintptr_t)curthread;
1074 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1075 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1076 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1080 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1086 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1090 if (__predict_true(locked)) {
1091 if (__predict_false(flags & LK_INTERLOCK)) {
1092 class = LOCK_CLASS(ilk);
1093 class->lc_unlock(ilk);
1097 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1098 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1102 static __noinline int
1103 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1104 const char *file, int line)
1107 int wakeup_swapper = 0;
1109 if (SCHEDULER_STOPPED())
1112 wakeup_swapper = wakeupshlk(lk, file, line);
1115 lockmgr_exit(flags, ilk, wakeup_swapper);
1119 static __noinline int
1120 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1121 const char *file, int line)
1124 int wakeup_swapper = 0;
1128 if (SCHEDULER_STOPPED())
1131 tid = (uintptr_t)curthread;
1134 * As first option, treact the lock as if it has not
1136 * Fix-up the tid var if the lock has been disowned.
1138 if (LK_HOLDER(x) == LK_KERNPROC)
1142 * The lock is held in exclusive mode.
1143 * If the lock is recursed also, then unrecurse it.
1145 if (lockmgr_recursed_v(x)) {
1146 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1148 if (lk->lk_recurse == 0)
1149 atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1152 if (tid != LK_KERNPROC)
1153 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1156 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1159 sleepq_lock(&lk->lock_object);
1160 x = lockmgr_read_value(lk);
1164 * If the lock has exclusive waiters, give them
1165 * preference in order to avoid deadlock with
1166 * shared runners up.
1167 * If interruptible sleeps left the exclusive queue
1168 * empty avoid a starvation for the threads sleeping
1169 * on the shared queue by giving them precedence
1170 * and cleaning up the exclusive waiters bit anyway.
1171 * Please note that lk_exslpfail count may be lying
1172 * about the real number of waiters with the
1173 * LK_SLEEPFAIL flag on because they may be used in
1174 * conjunction with interruptible sleeps so
1175 * lk_exslpfail might be considered an 'upper limit'
1176 * bound, including the edge cases.
1178 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1179 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1180 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1181 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1182 lk->lk_exslpfail = 0;
1183 queue = SQ_EXCLUSIVE_QUEUE;
1184 v |= (x & LK_SHARED_WAITERS);
1186 lk->lk_exslpfail = 0;
1188 "%s: %p has only LK_SLEEPFAIL sleepers",
1191 "%s: %p waking up threads on the exclusive queue",
1193 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1194 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1195 queue = SQ_SHARED_QUEUE;
1199 * Exclusive waiters sleeping with LK_SLEEPFAIL
1200 * on and using interruptible sleeps/timeout
1201 * may have left spourious lk_exslpfail counts
1202 * on, so clean it up anyway.
1204 lk->lk_exslpfail = 0;
1205 queue = SQ_SHARED_QUEUE;
1208 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1209 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1211 atomic_store_rel_ptr(&lk->lk_lock, v);
1212 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1213 sleepq_release(&lk->lock_object);
1216 lockmgr_exit(flags, ilk, wakeup_swapper);
1221 * Lightweight entry points for common operations.
1223 * Functionality is similar to sx locks, in that none of the additional lockmgr
1224 * features are supported. To be clear, these are NOT supported:
1225 * 1. shared locking disablement
1226 * 2. returning with an error after sleep
1227 * 3. unlocking the interlock
1229 * If in doubt, use lockmgr_lock_flags.
1232 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1236 MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1237 MPASS((flags & LK_INTERLOCK) == 0);
1238 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1240 if (LK_CAN_WITNESS(flags))
1241 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1243 x = lockmgr_read_value(lk);
1244 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1245 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1249 return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1253 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1257 MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1258 MPASS((flags & LK_INTERLOCK) == 0);
1260 if (LK_CAN_WITNESS(flags))
1261 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1262 LOP_EXCLUSIVE, file, line, NULL);
1263 tid = (uintptr_t)curthread;
1264 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1265 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1270 return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1274 lockmgr_unlock(struct lock *lk)
1283 _lockmgr_assert(lk, KA_LOCKED, file, line);
1284 x = lockmgr_read_value(lk);
1285 if (__predict_true(x & LK_SHARE) != 0) {
1286 lockmgr_note_shared_release(lk, file, line);
1287 if (lockmgr_sunlock_try(lk, &x)) {
1288 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1290 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1293 tid = (uintptr_t)curthread;
1294 lockmgr_note_exclusive_release(lk, file, line);
1295 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1296 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1298 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1305 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1306 const char *wmesg, int pri, int timo, const char *file, int line)
1309 struct lockmgr_wait lwa;
1310 struct lock_class *class;
1312 uintptr_t tid, v, x;
1313 u_int op, realexslp;
1314 int error, ipri, itimo, queue, wakeup_swapper;
1315 #ifdef LOCK_PROFILING
1316 uint64_t waittime = 0;
1320 if (SCHEDULER_STOPPED())
1324 tid = (uintptr_t)curthread;
1325 op = (flags & LK_TYPE_MASK);
1326 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1327 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1328 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1330 lwa.iwmesg = iwmesg;
1334 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1335 KASSERT((op & (op - 1)) == 0,
1336 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1337 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1338 (op != LK_DOWNGRADE && op != LK_RELEASE),
1339 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1340 __func__, file, line));
1341 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1342 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1343 __func__, file, line));
1344 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1345 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1346 lk->lock_object.lo_name, file, line));
1348 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1350 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1358 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1360 if (flags & LK_INTERLOCK)
1361 class->lc_unlock(ilk);
1369 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1373 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1376 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1379 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1380 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1383 * Panic if the lock is recursed.
1385 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1386 if (flags & LK_INTERLOCK)
1387 class->lc_unlock(ilk);
1388 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1389 __func__, iwmesg, file, line);
1391 TD_SLOCKS_INC(curthread);
1394 * In order to preserve waiters flags, just spin.
1397 x = lockmgr_read_value(lk);
1398 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1399 x &= LK_ALL_WAITERS;
1400 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1401 LK_SHARERS_LOCK(1) | x))
1405 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1406 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1409 _lockmgr_assert(lk, KA_LOCKED, file, line);
1410 x = lockmgr_read_value(lk);
1412 if (__predict_true(x & LK_SHARE) != 0) {
1413 lockmgr_note_shared_release(lk, file, line);
1414 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1416 lockmgr_note_exclusive_release(lk, file, line);
1417 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1421 if (LK_CAN_WITNESS(flags))
1422 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1423 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1427 * Trying to drain a lock we already own will result in a
1430 if (lockmgr_xlocked(lk)) {
1431 if (flags & LK_INTERLOCK)
1432 class->lc_unlock(ilk);
1433 panic("%s: draining %s with the lock held @ %s:%d\n",
1434 __func__, iwmesg, file, line);
1438 if (lk->lk_lock == LK_UNLOCKED &&
1439 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1443 PMC_SOFT_CALL( , , lock, failed);
1445 lock_profile_obtain_lock_failed(&lk->lock_object, false,
1446 &contested, &waittime);
1449 * If the lock is expected to not sleep just give up
1452 if (LK_TRYOP(flags)) {
1453 LOCK_LOG2(lk, "%s: %p fails the try operation",
1460 * Acquire the sleepqueue chain lock because we
1461 * probabilly will need to manipulate waiters flags.
1463 sleepq_lock(&lk->lock_object);
1464 x = lockmgr_read_value(lk);
1467 * if the lock has been released while we spun on
1468 * the sleepqueue chain lock just try again.
1470 if (x == LK_UNLOCKED) {
1471 sleepq_release(&lk->lock_object);
1475 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1476 if ((x & ~v) == LK_UNLOCKED) {
1477 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1480 * If interruptible sleeps left the exclusive
1481 * queue empty avoid a starvation for the
1482 * threads sleeping on the shared queue by
1483 * giving them precedence and cleaning up the
1484 * exclusive waiters bit anyway.
1485 * Please note that lk_exslpfail count may be
1486 * lying about the real number of waiters with
1487 * the LK_SLEEPFAIL flag on because they may
1488 * be used in conjunction with interruptible
1489 * sleeps so lk_exslpfail might be considered
1490 * an 'upper limit' bound, including the edge
1493 if (v & LK_EXCLUSIVE_WAITERS) {
1494 queue = SQ_EXCLUSIVE_QUEUE;
1495 v &= ~LK_EXCLUSIVE_WAITERS;
1498 * Exclusive waiters sleeping with
1499 * LK_SLEEPFAIL on and using
1500 * interruptible sleeps/timeout may
1501 * have left spourious lk_exslpfail
1502 * counts on, so clean it up anyway.
1504 MPASS(v & LK_SHARED_WAITERS);
1505 lk->lk_exslpfail = 0;
1506 queue = SQ_SHARED_QUEUE;
1507 v &= ~LK_SHARED_WAITERS;
1509 if (queue == SQ_EXCLUSIVE_QUEUE) {
1511 sleepq_sleepcnt(&lk->lock_object,
1512 SQ_EXCLUSIVE_QUEUE);
1513 if (lk->lk_exslpfail >= realexslp) {
1514 lk->lk_exslpfail = 0;
1515 queue = SQ_SHARED_QUEUE;
1516 v &= ~LK_SHARED_WAITERS;
1517 if (realexslp != 0) {
1519 "%s: %p has only LK_SLEEPFAIL sleepers",
1522 "%s: %p waking up threads on the exclusive queue",
1528 SQ_EXCLUSIVE_QUEUE);
1531 lk->lk_exslpfail = 0;
1533 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1534 sleepq_release(&lk->lock_object);
1538 "%s: %p waking up all threads on the %s queue",
1539 __func__, lk, queue == SQ_SHARED_QUEUE ?
1540 "shared" : "exclusive");
1541 wakeup_swapper |= sleepq_broadcast(
1542 &lk->lock_object, SLEEPQ_LK, 0, queue);
1545 * If shared waiters have been woken up we need
1546 * to wait for one of them to acquire the lock
1547 * before to set the exclusive waiters in
1548 * order to avoid a deadlock.
1550 if (queue == SQ_SHARED_QUEUE) {
1551 for (v = lk->lk_lock;
1552 (v & LK_SHARE) && !LK_SHARERS(v);
1559 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1560 * fail, loop back and retry.
1562 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1563 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1564 x | LK_EXCLUSIVE_WAITERS)) {
1565 sleepq_release(&lk->lock_object);
1568 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1573 * As far as we have been unable to acquire the
1574 * exclusive lock and the exclusive waiters flag
1575 * is set, we will sleep.
1577 if (flags & LK_INTERLOCK) {
1578 class->lc_unlock(ilk);
1579 flags &= ~LK_INTERLOCK;
1582 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1583 SQ_EXCLUSIVE_QUEUE);
1584 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1586 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1591 lock_profile_obtain_lock_success(&lk->lock_object,
1592 false, contested, waittime, file, line);
1593 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1594 lk->lk_recurse, file, line);
1595 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1596 LK_TRYWIT(flags), file, line);
1597 TD_LOCKS_INC(curthread);
1602 if (flags & LK_INTERLOCK)
1603 class->lc_unlock(ilk);
1604 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1607 if (flags & LK_INTERLOCK)
1608 class->lc_unlock(ilk);
1616 _lockmgr_disown(struct lock *lk, const char *file, int line)
1620 if (SCHEDULER_STOPPED())
1623 tid = (uintptr_t)curthread;
1624 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1627 * Panic if the lock is recursed.
1629 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1630 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1631 __func__, file, line);
1634 * If the owner is already LK_KERNPROC just skip the whole operation.
1636 if (LK_HOLDER(lk->lk_lock) != tid)
1638 lock_profile_release_lock(&lk->lock_object, false);
1639 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1640 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1641 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1642 TD_LOCKS_DEC(curthread);
1646 * In order to preserve waiters flags, just spin.
1649 x = lockmgr_read_value(lk);
1650 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1651 x &= LK_ALL_WAITERS;
1652 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1660 lockmgr_printinfo(const struct lock *lk)
1665 if (lk->lk_lock == LK_UNLOCKED)
1666 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1667 else if (lk->lk_lock & LK_SHARE)
1668 printf("lock type %s: SHARED (count %ju)\n",
1669 lk->lock_object.lo_name,
1670 (uintmax_t)LK_SHARERS(lk->lk_lock));
1672 td = lockmgr_xholder(lk);
1673 if (td == (struct thread *)LK_KERNPROC)
1674 printf("lock type %s: EXCL by KERNPROC\n",
1675 lk->lock_object.lo_name);
1677 printf("lock type %s: EXCL by thread %p "
1678 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1679 td, td->td_proc->p_pid, td->td_proc->p_comm,
1684 if (x & LK_EXCLUSIVE_WAITERS)
1685 printf(" with exclusive waiters pending\n");
1686 if (x & LK_SHARED_WAITERS)
1687 printf(" with shared waiters pending\n");
1688 if (x & LK_EXCLUSIVE_SPINNERS)
1689 printf(" with exclusive spinners pending\n");
1695 lockstatus(const struct lock *lk)
1701 x = lockmgr_read_value(lk);
1704 if ((x & LK_SHARE) == 0) {
1705 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1709 } else if (x == LK_UNLOCKED)
1715 #ifdef INVARIANT_SUPPORT
1717 FEATURE(invariant_support,
1718 "Support for modules compiled with INVARIANTS option");
1721 #undef _lockmgr_assert
1725 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1729 if (SCHEDULER_STOPPED())
1733 case KA_SLOCKED | KA_NOTRECURSED:
1734 case KA_SLOCKED | KA_RECURSED:
1737 case KA_LOCKED | KA_NOTRECURSED:
1738 case KA_LOCKED | KA_RECURSED:
1742 * We cannot trust WITNESS if the lock is held in exclusive
1743 * mode and a call to lockmgr_disown() happened.
1744 * Workaround this skipping the check if the lock is held in
1745 * exclusive mode even for the KA_LOCKED case.
1747 if (slocked || (lk->lk_lock & LK_SHARE)) {
1748 witness_assert(&lk->lock_object, what, file, line);
1752 if (lk->lk_lock == LK_UNLOCKED ||
1753 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1754 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1755 panic("Lock %s not %slocked @ %s:%d\n",
1756 lk->lock_object.lo_name, slocked ? "share" : "",
1759 if ((lk->lk_lock & LK_SHARE) == 0) {
1760 if (lockmgr_recursed(lk)) {
1761 if (what & KA_NOTRECURSED)
1762 panic("Lock %s recursed @ %s:%d\n",
1763 lk->lock_object.lo_name, file,
1765 } else if (what & KA_RECURSED)
1766 panic("Lock %s not recursed @ %s:%d\n",
1767 lk->lock_object.lo_name, file, line);
1771 case KA_XLOCKED | KA_NOTRECURSED:
1772 case KA_XLOCKED | KA_RECURSED:
1773 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1774 panic("Lock %s not exclusively locked @ %s:%d\n",
1775 lk->lock_object.lo_name, file, line);
1776 if (lockmgr_recursed(lk)) {
1777 if (what & KA_NOTRECURSED)
1778 panic("Lock %s recursed @ %s:%d\n",
1779 lk->lock_object.lo_name, file, line);
1780 } else if (what & KA_RECURSED)
1781 panic("Lock %s not recursed @ %s:%d\n",
1782 lk->lock_object.lo_name, file, line);
1785 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1786 panic("Lock %s exclusively locked @ %s:%d\n",
1787 lk->lock_object.lo_name, file, line);
1790 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1798 lockmgr_chain(struct thread *td, struct thread **ownerp)
1800 const struct lock *lk;
1804 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1806 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1807 if (lk->lk_lock & LK_SHARE)
1808 db_printf("SHARED (count %ju)\n",
1809 (uintmax_t)LK_SHARERS(lk->lk_lock));
1811 db_printf("EXCL\n");
1812 *ownerp = lockmgr_xholder(lk);
1818 db_show_lockmgr(const struct lock_object *lock)
1821 const struct lock *lk;
1823 lk = (const struct lock *)lock;
1825 db_printf(" state: ");
1826 if (lk->lk_lock == LK_UNLOCKED)
1827 db_printf("UNLOCKED\n");
1828 else if (lk->lk_lock & LK_SHARE)
1829 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1831 td = lockmgr_xholder(lk);
1832 if (td == (struct thread *)LK_KERNPROC)
1833 db_printf("XLOCK: LK_KERNPROC\n");
1835 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1836 td->td_tid, td->td_proc->p_pid,
1837 td->td_proc->p_comm);
1838 if (lockmgr_recursed(lk))
1839 db_printf(" recursed: %d\n", lk->lk_recurse);
1841 db_printf(" waiters: ");
1842 switch (lk->lk_lock & LK_ALL_WAITERS) {
1843 case LK_SHARED_WAITERS:
1844 db_printf("shared\n");
1846 case LK_EXCLUSIVE_WAITERS:
1847 db_printf("exclusive\n");
1849 case LK_ALL_WAITERS:
1850 db_printf("shared and exclusive\n");
1853 db_printf("none\n");
1855 db_printf(" spinners: ");
1856 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1857 db_printf("exclusive\n");
1859 db_printf("none\n");