2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
40 #include <sys/limits.h>
42 #include <sys/lock_profile.h>
43 #include <sys/lockmgr.h>
44 #include <sys/lockstat.h>
45 #include <sys/mutex.h>
47 #include <sys/sleepqueue.h>
49 #include <sys/stack.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
54 #include <machine/cpu.h>
61 #include <sys/pmckern.h>
62 PMC_SOFT_DECLARE( , , lock, failed);
66 * Hack. There should be prio_t or similar so that this is not necessary.
68 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
69 "prio flags wont fit in u_short pri in struct lock");
71 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
72 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
74 #define SQ_EXCLUSIVE_QUEUE 0
75 #define SQ_SHARED_QUEUE 1
78 #define _lockmgr_assert(lk, what, file, line)
81 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
82 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
85 #define STACK_PRINT(lk)
86 #define STACK_SAVE(lk)
87 #define STACK_ZERO(lk)
89 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
90 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
91 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
94 #define LOCK_LOG2(lk, string, arg1, arg2) \
95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
96 CTR2(KTR_LOCK, (string), (arg1), (arg2))
97 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
98 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
99 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
101 #define GIANT_DECLARE \
103 WITNESS_SAVE_DECL(Giant)
104 #define GIANT_RESTORE() do { \
105 if (__predict_false(_i > 0)) { \
108 WITNESS_RESTORE(&Giant.lock_object, Giant); \
111 #define GIANT_SAVE() do { \
112 if (__predict_false(mtx_owned(&Giant))) { \
113 WITNESS_SAVE(&Giant.lock_object, Giant); \
114 while (mtx_owned(&Giant)) { \
116 mtx_unlock(&Giant); \
121 static bool __always_inline
122 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
125 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
128 if (fp || (!(x & LK_SHARE)))
130 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
131 (curthread->td_pflags & TDP_DEADLKTREAT))
136 #define LK_TRYOP(x) \
139 #define LK_CAN_WITNESS(x) \
140 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
141 #define LK_TRYWIT(x) \
142 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
144 #define lockmgr_disowned(lk) \
145 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
147 #define lockmgr_xlocked_v(v) \
148 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
150 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
152 static void assert_lockmgr(const struct lock_object *lock, int how);
154 static void db_show_lockmgr(const struct lock_object *lock);
156 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
158 static int owner_lockmgr(const struct lock_object *lock,
159 struct thread **owner);
161 static uintptr_t unlock_lockmgr(struct lock_object *lock);
163 struct lock_class lock_class_lockmgr = {
164 .lc_name = "lockmgr",
165 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
166 .lc_assert = assert_lockmgr,
168 .lc_ddb_show = db_show_lockmgr,
170 .lc_lock = lock_lockmgr,
171 .lc_unlock = unlock_lockmgr,
173 .lc_owner = owner_lockmgr,
177 static __read_mostly bool lk_adaptive = true;
178 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
179 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
181 #define lockmgr_delay locks_delay
183 struct lockmgr_wait {
189 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
191 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
194 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
196 struct lock_class *class;
198 if (flags & LK_INTERLOCK) {
199 class = LOCK_CLASS(ilk);
200 class->lc_unlock(ilk);
203 if (__predict_false(wakeup_swapper))
208 lockmgr_note_shared_acquire(struct lock *lk, int contested,
209 uint64_t waittime, const char *file, int line, int flags)
212 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
213 waittime, file, line, LOCKSTAT_READER);
214 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
215 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
216 TD_LOCKS_INC(curthread);
217 TD_SLOCKS_INC(curthread);
222 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
225 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
226 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
227 TD_LOCKS_DEC(curthread);
228 TD_SLOCKS_DEC(curthread);
232 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
233 uint64_t waittime, const char *file, int line, int flags)
236 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
237 waittime, file, line, LOCKSTAT_WRITER);
238 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
239 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
241 TD_LOCKS_INC(curthread);
246 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
249 if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
250 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
251 TD_LOCKS_DEC(curthread);
253 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
257 static __inline struct thread *
258 lockmgr_xholder(const struct lock *lk)
262 x = lockmgr_read_value(lk);
263 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
267 * It assumes sleepq_lock held and returns with this one unheld.
268 * It also assumes the generic interlock is sane and previously checked.
269 * If LK_INTERLOCK is specified the interlock is not reacquired after the
273 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
274 const char *wmesg, int pri, int timo, int queue)
277 struct lock_class *class;
280 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
281 catch = pri & PCATCH;
285 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
286 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
288 if (flags & LK_INTERLOCK)
289 class->lc_unlock(ilk);
290 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
291 if (lk->lk_exslpfail < USHRT_MAX)
295 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
296 SLEEPQ_INTERRUPTIBLE : 0), queue);
297 if ((flags & LK_TIMELOCK) && timo)
298 sleepq_set_timeout(&lk->lock_object, timo);
301 * Decisional switch for real sleeping.
303 if ((flags & LK_TIMELOCK) && timo && catch)
304 error = sleepq_timedwait_sig(&lk->lock_object, pri);
305 else if ((flags & LK_TIMELOCK) && timo)
306 error = sleepq_timedwait(&lk->lock_object, pri);
308 error = sleepq_wait_sig(&lk->lock_object, pri);
310 sleepq_wait(&lk->lock_object, pri);
312 if ((flags & LK_SLEEPFAIL) && error == 0)
319 wakeupshlk(struct lock *lk, const char *file, int line)
321 uintptr_t v, x, orig_x;
323 int queue, wakeup_swapper;
327 x = lockmgr_read_value(lk);
328 if (lockmgr_sunlock_try(lk, &x))
332 * We should have a sharer with waiters, so enter the hard
333 * path in order to handle wakeups correctly.
335 sleepq_lock(&lk->lock_object);
336 orig_x = lockmgr_read_value(lk);
338 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
342 * If the lock has exclusive waiters, give them preference in
343 * order to avoid deadlock with shared runners up.
344 * If interruptible sleeps left the exclusive queue empty
345 * avoid a starvation for the threads sleeping on the shared
346 * queue by giving them precedence and cleaning up the
347 * exclusive waiters bit anyway.
348 * Please note that lk_exslpfail count may be lying about
349 * the real number of waiters with the LK_SLEEPFAIL flag on
350 * because they may be used in conjunction with interruptible
351 * sleeps so lk_exslpfail might be considered an 'upper limit'
352 * bound, including the edge cases.
354 realexslp = sleepq_sleepcnt(&lk->lock_object,
356 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
357 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
358 lk->lk_exslpfail = 0;
359 queue = SQ_EXCLUSIVE_QUEUE;
360 v |= (x & LK_SHARED_WAITERS);
362 lk->lk_exslpfail = 0;
364 "%s: %p has only LK_SLEEPFAIL sleepers",
367 "%s: %p waking up threads on the exclusive queue",
370 sleepq_broadcast(&lk->lock_object,
371 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
372 queue = SQ_SHARED_QUEUE;
376 * Exclusive waiters sleeping with LK_SLEEPFAIL on
377 * and using interruptible sleeps/timeout may have
378 * left spourious lk_exslpfail counts on, so clean
381 lk->lk_exslpfail = 0;
382 queue = SQ_SHARED_QUEUE;
385 if (lockmgr_sunlock_try(lk, &orig_x)) {
386 sleepq_release(&lk->lock_object);
390 x |= LK_SHARERS_LOCK(1);
391 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
395 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
396 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
398 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
400 sleepq_release(&lk->lock_object);
404 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
405 return (wakeup_swapper);
409 assert_lockmgr(const struct lock_object *lock, int what)
412 panic("lockmgr locks do not support assertions");
416 lock_lockmgr(struct lock_object *lock, uintptr_t how)
419 panic("lockmgr locks do not support sleep interlocking");
423 unlock_lockmgr(struct lock_object *lock)
426 panic("lockmgr locks do not support sleep interlocking");
431 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
434 panic("lockmgr locks do not support owner inquiring");
439 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
443 MPASS((flags & ~LK_INIT_MASK) == 0);
444 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
445 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
448 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
449 if (flags & LK_CANRECURSE)
450 iflags |= LO_RECURSABLE;
451 if ((flags & LK_NODUP) == 0)
453 if (flags & LK_NOPROFILE)
454 iflags |= LO_NOPROFILE;
455 if ((flags & LK_NOWITNESS) == 0)
456 iflags |= LO_WITNESS;
457 if (flags & LK_QUIET)
459 if (flags & LK_IS_VNODE)
460 iflags |= LO_IS_VNODE;
463 iflags |= flags & LK_NOSHARE;
465 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
466 lk->lk_lock = LK_UNLOCKED;
468 lk->lk_exslpfail = 0;
475 * XXX: Gross hacks to manipulate external lock flags after
476 * initialization. Used for certain vnode and buf locks.
479 lockallowshare(struct lock *lk)
482 lockmgr_assert(lk, KA_XLOCKED);
483 lk->lock_object.lo_flags &= ~LK_NOSHARE;
487 lockdisableshare(struct lock *lk)
490 lockmgr_assert(lk, KA_XLOCKED);
491 lk->lock_object.lo_flags |= LK_NOSHARE;
495 lockallowrecurse(struct lock *lk)
498 lockmgr_assert(lk, KA_XLOCKED);
499 lk->lock_object.lo_flags |= LO_RECURSABLE;
503 lockdisablerecurse(struct lock *lk)
506 lockmgr_assert(lk, KA_XLOCKED);
507 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
511 lockdestroy(struct lock *lk)
514 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
515 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
516 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
517 lock_destroy(&lk->lock_object);
520 static bool __always_inline
521 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
525 * If no other thread has an exclusive lock, or
526 * no exclusive waiter is present, bump the count of
527 * sharers. Since we have to preserve the state of
528 * waiters, if we fail to acquire the shared lock
529 * loop back and retry.
531 while (LK_CAN_SHARE(*xp, flags, fp)) {
532 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
533 *xp + LK_ONE_SHARER)) {
540 static bool __always_inline
541 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
545 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
546 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
547 *xp - LK_ONE_SHARER))
557 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
560 struct thread *owner;
564 MPASS(x != LK_UNLOCKED);
565 owner = (struct thread *)LK_HOLDER(x);
567 MPASS(owner != curthread);
568 if (owner == (struct thread *)LK_KERNPROC)
570 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
574 if (!TD_IS_RUNNING(owner))
576 if ((x & LK_ALL_WAITERS) != 0)
579 x = lockmgr_read_value(lk);
580 if (LK_CAN_SHARE(x, flags, false)) {
584 owner = (struct thread *)LK_HOLDER(x);
588 static __noinline int
589 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
590 const char *file, int line, struct lockmgr_wait *lwa)
598 uint64_t sleep_time = 0;
600 #ifdef LOCK_PROFILING
601 uint64_t waittime = 0;
604 struct lock_delay_arg lda;
606 if (KERNEL_PANICKED())
609 tid = (uintptr_t)curthread;
611 if (LK_CAN_WITNESS(flags))
612 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
613 file, line, flags & LK_INTERLOCK ? ilk : NULL);
614 x = lockmgr_read_value(lk);
615 lock_delay_arg_init(&lda, &lockmgr_delay);
617 flags &= ~LK_ADAPTIVE;
619 * The lock may already be locked exclusive by curthread,
622 if (LK_HOLDER(x) == tid) {
624 "%s: %p already held in exclusive mode",
631 if (lockmgr_slock_try(lk, &x, flags, false))
634 lock_profile_obtain_lock_failed(&lk->lock_object, false,
635 &contested, &waittime);
637 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
638 if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
643 PMC_SOFT_CALL( , , lock, failed);
647 * If the lock is expected to not sleep just give up
650 if (LK_TRYOP(flags)) {
651 LOCK_LOG2(lk, "%s: %p fails the try operation",
658 * Acquire the sleepqueue chain lock because we
659 * probabilly will need to manipulate waiters flags.
661 sleepq_lock(&lk->lock_object);
662 x = lockmgr_read_value(lk);
666 * if the lock can be acquired in shared mode, try
669 if (LK_CAN_SHARE(x, flags, false)) {
670 sleepq_release(&lk->lock_object);
675 * Try to set the LK_SHARED_WAITERS flag. If we fail,
676 * loop back and retry.
678 if ((x & LK_SHARED_WAITERS) == 0) {
679 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
680 x | LK_SHARED_WAITERS)) {
683 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
688 iwmesg = lk->lock_object.lo_name;
692 iwmesg = lwa->iwmesg;
698 * As far as we have been unable to acquire the
699 * shared lock and the shared waiters flag is set,
703 sleep_time -= lockstat_nsecs(&lk->lock_object);
705 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
708 sleep_time += lockstat_nsecs(&lk->lock_object);
710 flags &= ~LK_INTERLOCK;
713 "%s: interrupted sleep for %p with %d",
714 __func__, lk, error);
717 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
719 x = lockmgr_read_value(lk);
724 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
725 LOCKSTAT_READER, (x & LK_SHARE) == 0,
726 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
728 #ifdef LOCK_PROFILING
729 lockmgr_note_shared_acquire(lk, contested, waittime,
732 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
738 lockmgr_exit(flags, ilk, 0);
743 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
745 struct thread *owner;
749 MPASS(x != LK_UNLOCKED);
750 owner = (struct thread *)LK_HOLDER(x);
752 MPASS(owner != curthread);
755 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
757 if (owner == (struct thread *)LK_KERNPROC)
759 if (!TD_IS_RUNNING(owner))
761 if ((x & LK_ALL_WAITERS) != 0)
764 x = lockmgr_read_value(lk);
765 if (x == LK_UNLOCKED) {
769 owner = (struct thread *)LK_HOLDER(x);
773 static __noinline int
774 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
775 const char *file, int line, struct lockmgr_wait *lwa)
777 struct lock_class *class;
784 uint64_t sleep_time = 0;
786 #ifdef LOCK_PROFILING
787 uint64_t waittime = 0;
790 struct lock_delay_arg lda;
792 if (KERNEL_PANICKED())
795 tid = (uintptr_t)curthread;
797 if (LK_CAN_WITNESS(flags))
798 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
799 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
803 * If curthread already holds the lock and this one is
804 * allowed to recurse, simply recurse on it.
806 if (lockmgr_xlocked(lk)) {
807 if ((flags & LK_CANRECURSE) == 0 &&
808 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
810 * If the lock is expected to not panic just
811 * give up and return.
813 if (LK_TRYOP(flags)) {
815 "%s: %p fails the try operation",
820 if (flags & LK_INTERLOCK) {
821 class = LOCK_CLASS(ilk);
822 class->lc_unlock(ilk);
825 panic("%s: recursing on non recursive lockmgr %p "
826 "@ %s:%d\n", __func__, lk, file, line);
828 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
830 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
831 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
832 lk->lk_recurse, file, line);
833 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
834 LK_TRYWIT(flags), file, line);
835 TD_LOCKS_INC(curthread);
840 lock_delay_arg_init(&lda, &lockmgr_delay);
842 flags &= ~LK_ADAPTIVE;
844 if (x == LK_UNLOCKED) {
845 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
850 lock_profile_obtain_lock_failed(&lk->lock_object, false,
851 &contested, &waittime);
853 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
854 if (lockmgr_xlock_adaptive(&lda, lk, &x))
858 PMC_SOFT_CALL( , , lock, failed);
862 * If the lock is expected to not sleep just give up
865 if (LK_TRYOP(flags)) {
866 LOCK_LOG2(lk, "%s: %p fails the try operation",
873 * Acquire the sleepqueue chain lock because we
874 * probabilly will need to manipulate waiters flags.
876 sleepq_lock(&lk->lock_object);
877 x = lockmgr_read_value(lk);
881 * if the lock has been released while we spun on
882 * the sleepqueue chain lock just try again.
884 if (x == LK_UNLOCKED) {
885 sleepq_release(&lk->lock_object);
890 * The lock can be in the state where there is a
891 * pending queue of waiters, but still no owner.
892 * This happens when the lock is contested and an
893 * owner is going to claim the lock.
894 * If curthread is the one successfully acquiring it
895 * claim lock ownership and return, preserving waiters
898 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
899 if ((x & ~v) == LK_UNLOCKED) {
900 v &= ~LK_EXCLUSIVE_SPINNERS;
901 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
903 sleepq_release(&lk->lock_object);
905 "%s: %p claimed by a new writer",
913 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
914 * fail, loop back and retry.
916 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
917 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
918 x | LK_EXCLUSIVE_WAITERS)) {
921 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
926 iwmesg = lk->lock_object.lo_name;
930 iwmesg = lwa->iwmesg;
936 * As far as we have been unable to acquire the
937 * exclusive lock and the exclusive waiters flag
938 * is set, we will sleep.
941 sleep_time -= lockstat_nsecs(&lk->lock_object);
943 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
946 sleep_time += lockstat_nsecs(&lk->lock_object);
948 flags &= ~LK_INTERLOCK;
951 "%s: interrupted sleep for %p with %d",
952 __func__, lk, error);
955 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
957 x = lockmgr_read_value(lk);
962 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
963 LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
964 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
966 #ifdef LOCK_PROFILING
967 lockmgr_note_exclusive_acquire(lk, contested, waittime,
970 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
976 lockmgr_exit(flags, ilk, 0);
980 static __noinline int
981 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
982 const char *file, int line, struct lockmgr_wait *lwa)
984 uintptr_t tid, v, setv;
988 if (KERNEL_PANICKED())
991 tid = (uintptr_t)curthread;
993 _lockmgr_assert(lk, KA_SLOCKED, file, line);
995 op = flags & LK_TYPE_MASK;
996 v = lockmgr_read_value(lk);
998 if (LK_SHARERS(v) > 1) {
999 if (op == LK_TRYUPGRADE) {
1000 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
1005 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1006 v - LK_ONE_SHARER)) {
1007 lockmgr_note_shared_release(lk, file, line);
1012 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1015 setv |= (v & LK_ALL_WAITERS);
1018 * Try to switch from one shared lock to an exclusive one.
1019 * We need to preserve waiters flags during the operation.
1021 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1022 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1024 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1025 LK_TRYWIT(flags), file, line);
1026 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1027 TD_SLOCKS_DEC(curthread);
1033 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1034 flags &= ~LK_INTERLOCK;
1036 lockmgr_exit(flags, ilk, 0);
1041 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1042 const char *file, int line)
1044 struct lock_class *class;
1049 if (KERNEL_PANICKED())
1052 op = flags & LK_TYPE_MASK;
1056 if (LK_CAN_WITNESS(flags))
1057 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1058 file, line, flags & LK_INTERLOCK ? ilk : NULL);
1059 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1061 x = lockmgr_read_value(lk);
1062 if (lockmgr_slock_try(lk, &x, flags, true)) {
1063 lockmgr_note_shared_acquire(lk, 0, 0,
1067 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1072 if (LK_CAN_WITNESS(flags))
1073 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1074 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1076 tid = (uintptr_t)curthread;
1077 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1078 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1079 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1083 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1089 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1093 if (__predict_true(locked)) {
1094 if (__predict_false(flags & LK_INTERLOCK)) {
1095 class = LOCK_CLASS(ilk);
1096 class->lc_unlock(ilk);
1100 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1101 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1105 static __noinline int
1106 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1107 const char *file, int line)
1110 int wakeup_swapper = 0;
1112 if (KERNEL_PANICKED())
1115 wakeup_swapper = wakeupshlk(lk, file, line);
1118 lockmgr_exit(flags, ilk, wakeup_swapper);
1122 static __noinline int
1123 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1124 const char *file, int line)
1127 int wakeup_swapper = 0;
1131 if (KERNEL_PANICKED())
1134 tid = (uintptr_t)curthread;
1137 * As first option, treact the lock as if it has not
1139 * Fix-up the tid var if the lock has been disowned.
1141 if (LK_HOLDER(x) == LK_KERNPROC)
1145 * The lock is held in exclusive mode.
1146 * If the lock is recursed also, then unrecurse it.
1148 if (lockmgr_recursed_v(x)) {
1149 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1151 if (lk->lk_recurse == 0)
1152 atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1155 if (tid != LK_KERNPROC)
1156 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1159 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1162 sleepq_lock(&lk->lock_object);
1163 x = lockmgr_read_value(lk);
1167 * If the lock has exclusive waiters, give them
1168 * preference in order to avoid deadlock with
1169 * shared runners up.
1170 * If interruptible sleeps left the exclusive queue
1171 * empty avoid a starvation for the threads sleeping
1172 * on the shared queue by giving them precedence
1173 * and cleaning up the exclusive waiters bit anyway.
1174 * Please note that lk_exslpfail count may be lying
1175 * about the real number of waiters with the
1176 * LK_SLEEPFAIL flag on because they may be used in
1177 * conjunction with interruptible sleeps so
1178 * lk_exslpfail might be considered an 'upper limit'
1179 * bound, including the edge cases.
1181 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1182 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1183 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1184 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1185 lk->lk_exslpfail = 0;
1186 queue = SQ_EXCLUSIVE_QUEUE;
1187 v |= (x & LK_SHARED_WAITERS);
1189 lk->lk_exslpfail = 0;
1191 "%s: %p has only LK_SLEEPFAIL sleepers",
1194 "%s: %p waking up threads on the exclusive queue",
1196 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1197 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1198 queue = SQ_SHARED_QUEUE;
1202 * Exclusive waiters sleeping with LK_SLEEPFAIL
1203 * on and using interruptible sleeps/timeout
1204 * may have left spourious lk_exslpfail counts
1205 * on, so clean it up anyway.
1207 lk->lk_exslpfail = 0;
1208 queue = SQ_SHARED_QUEUE;
1211 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1212 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1214 atomic_store_rel_ptr(&lk->lk_lock, v);
1215 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1216 sleepq_release(&lk->lock_object);
1219 lockmgr_exit(flags, ilk, wakeup_swapper);
1224 * Lightweight entry points for common operations.
1226 * Functionality is similar to sx locks, in that none of the additional lockmgr
1227 * features are supported. To be clear, these are NOT supported:
1228 * 1. shared locking disablement
1229 * 2. returning with an error after sleep
1230 * 3. unlocking the interlock
1232 * If in doubt, use lockmgr_lock_flags.
1235 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1239 MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1240 MPASS((flags & LK_INTERLOCK) == 0);
1241 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1243 if (LK_CAN_WITNESS(flags))
1244 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1246 x = lockmgr_read_value(lk);
1247 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1248 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1252 return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1256 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1260 MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1261 MPASS((flags & LK_INTERLOCK) == 0);
1263 if (LK_CAN_WITNESS(flags))
1264 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1265 LOP_EXCLUSIVE, file, line, NULL);
1266 tid = (uintptr_t)curthread;
1267 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1268 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1273 return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1277 lockmgr_unlock(struct lock *lk)
1286 _lockmgr_assert(lk, KA_LOCKED, file, line);
1287 x = lockmgr_read_value(lk);
1288 if (__predict_true(x & LK_SHARE) != 0) {
1289 lockmgr_note_shared_release(lk, file, line);
1290 if (lockmgr_sunlock_try(lk, &x)) {
1291 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1293 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1296 tid = (uintptr_t)curthread;
1297 lockmgr_note_exclusive_release(lk, file, line);
1298 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1299 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1301 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1308 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1309 const char *wmesg, int pri, int timo, const char *file, int line)
1312 struct lockmgr_wait lwa;
1313 struct lock_class *class;
1315 uintptr_t tid, v, x;
1316 u_int op, realexslp;
1317 int error, ipri, itimo, queue, wakeup_swapper;
1318 #ifdef LOCK_PROFILING
1319 uint64_t waittime = 0;
1323 if (KERNEL_PANICKED())
1327 tid = (uintptr_t)curthread;
1328 op = (flags & LK_TYPE_MASK);
1329 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1330 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1331 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1333 lwa.iwmesg = iwmesg;
1337 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1338 KASSERT((op & (op - 1)) == 0,
1339 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1340 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1341 (op != LK_DOWNGRADE && op != LK_RELEASE),
1342 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1343 __func__, file, line));
1344 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1345 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1346 __func__, file, line));
1347 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1348 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1349 lk->lock_object.lo_name, file, line));
1351 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1353 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1361 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1363 if (flags & LK_INTERLOCK)
1364 class->lc_unlock(ilk);
1372 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1376 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1379 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1382 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1383 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1386 * Panic if the lock is recursed.
1388 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1389 if (flags & LK_INTERLOCK)
1390 class->lc_unlock(ilk);
1391 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1392 __func__, iwmesg, file, line);
1394 TD_SLOCKS_INC(curthread);
1397 * In order to preserve waiters flags, just spin.
1400 x = lockmgr_read_value(lk);
1401 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1402 x &= LK_ALL_WAITERS;
1403 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1404 LK_SHARERS_LOCK(1) | x))
1408 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1409 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1412 _lockmgr_assert(lk, KA_LOCKED, file, line);
1413 x = lockmgr_read_value(lk);
1415 if (__predict_true(x & LK_SHARE) != 0) {
1416 lockmgr_note_shared_release(lk, file, line);
1417 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1419 lockmgr_note_exclusive_release(lk, file, line);
1420 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1424 if (LK_CAN_WITNESS(flags))
1425 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1426 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1430 * Trying to drain a lock we already own will result in a
1433 if (lockmgr_xlocked(lk)) {
1434 if (flags & LK_INTERLOCK)
1435 class->lc_unlock(ilk);
1436 panic("%s: draining %s with the lock held @ %s:%d\n",
1437 __func__, iwmesg, file, line);
1441 if (lk->lk_lock == LK_UNLOCKED &&
1442 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1446 PMC_SOFT_CALL( , , lock, failed);
1448 lock_profile_obtain_lock_failed(&lk->lock_object, false,
1449 &contested, &waittime);
1452 * If the lock is expected to not sleep just give up
1455 if (LK_TRYOP(flags)) {
1456 LOCK_LOG2(lk, "%s: %p fails the try operation",
1463 * Acquire the sleepqueue chain lock because we
1464 * probabilly will need to manipulate waiters flags.
1466 sleepq_lock(&lk->lock_object);
1467 x = lockmgr_read_value(lk);
1470 * if the lock has been released while we spun on
1471 * the sleepqueue chain lock just try again.
1473 if (x == LK_UNLOCKED) {
1474 sleepq_release(&lk->lock_object);
1478 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1479 if ((x & ~v) == LK_UNLOCKED) {
1480 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1483 * If interruptible sleeps left the exclusive
1484 * queue empty avoid a starvation for the
1485 * threads sleeping on the shared queue by
1486 * giving them precedence and cleaning up the
1487 * exclusive waiters bit anyway.
1488 * Please note that lk_exslpfail count may be
1489 * lying about the real number of waiters with
1490 * the LK_SLEEPFAIL flag on because they may
1491 * be used in conjunction with interruptible
1492 * sleeps so lk_exslpfail might be considered
1493 * an 'upper limit' bound, including the edge
1496 if (v & LK_EXCLUSIVE_WAITERS) {
1497 queue = SQ_EXCLUSIVE_QUEUE;
1498 v &= ~LK_EXCLUSIVE_WAITERS;
1501 * Exclusive waiters sleeping with
1502 * LK_SLEEPFAIL on and using
1503 * interruptible sleeps/timeout may
1504 * have left spourious lk_exslpfail
1505 * counts on, so clean it up anyway.
1507 MPASS(v & LK_SHARED_WAITERS);
1508 lk->lk_exslpfail = 0;
1509 queue = SQ_SHARED_QUEUE;
1510 v &= ~LK_SHARED_WAITERS;
1512 if (queue == SQ_EXCLUSIVE_QUEUE) {
1514 sleepq_sleepcnt(&lk->lock_object,
1515 SQ_EXCLUSIVE_QUEUE);
1516 if (lk->lk_exslpfail >= realexslp) {
1517 lk->lk_exslpfail = 0;
1518 queue = SQ_SHARED_QUEUE;
1519 v &= ~LK_SHARED_WAITERS;
1520 if (realexslp != 0) {
1522 "%s: %p has only LK_SLEEPFAIL sleepers",
1525 "%s: %p waking up threads on the exclusive queue",
1531 SQ_EXCLUSIVE_QUEUE);
1534 lk->lk_exslpfail = 0;
1536 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1537 sleepq_release(&lk->lock_object);
1541 "%s: %p waking up all threads on the %s queue",
1542 __func__, lk, queue == SQ_SHARED_QUEUE ?
1543 "shared" : "exclusive");
1544 wakeup_swapper |= sleepq_broadcast(
1545 &lk->lock_object, SLEEPQ_LK, 0, queue);
1548 * If shared waiters have been woken up we need
1549 * to wait for one of them to acquire the lock
1550 * before to set the exclusive waiters in
1551 * order to avoid a deadlock.
1553 if (queue == SQ_SHARED_QUEUE) {
1554 for (v = lk->lk_lock;
1555 (v & LK_SHARE) && !LK_SHARERS(v);
1562 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1563 * fail, loop back and retry.
1565 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1566 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1567 x | LK_EXCLUSIVE_WAITERS)) {
1568 sleepq_release(&lk->lock_object);
1571 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1576 * As far as we have been unable to acquire the
1577 * exclusive lock and the exclusive waiters flag
1578 * is set, we will sleep.
1580 if (flags & LK_INTERLOCK) {
1581 class->lc_unlock(ilk);
1582 flags &= ~LK_INTERLOCK;
1585 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1586 SQ_EXCLUSIVE_QUEUE);
1587 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1589 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1594 lock_profile_obtain_lock_success(&lk->lock_object,
1595 false, contested, waittime, file, line);
1596 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1597 lk->lk_recurse, file, line);
1598 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1599 LK_TRYWIT(flags), file, line);
1600 TD_LOCKS_INC(curthread);
1605 if (flags & LK_INTERLOCK)
1606 class->lc_unlock(ilk);
1607 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1610 if (flags & LK_INTERLOCK)
1611 class->lc_unlock(ilk);
1619 _lockmgr_disown(struct lock *lk, const char *file, int line)
1623 if (SCHEDULER_STOPPED())
1626 tid = (uintptr_t)curthread;
1627 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1630 * Panic if the lock is recursed.
1632 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1633 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1634 __func__, file, line);
1637 * If the owner is already LK_KERNPROC just skip the whole operation.
1639 if (LK_HOLDER(lk->lk_lock) != tid)
1641 lock_profile_release_lock(&lk->lock_object, false);
1642 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1643 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1644 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1645 TD_LOCKS_DEC(curthread);
1649 * In order to preserve waiters flags, just spin.
1652 x = lockmgr_read_value(lk);
1653 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1654 x &= LK_ALL_WAITERS;
1655 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1663 lockmgr_printinfo(const struct lock *lk)
1668 if (lk->lk_lock == LK_UNLOCKED)
1669 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1670 else if (lk->lk_lock & LK_SHARE)
1671 printf("lock type %s: SHARED (count %ju)\n",
1672 lk->lock_object.lo_name,
1673 (uintmax_t)LK_SHARERS(lk->lk_lock));
1675 td = lockmgr_xholder(lk);
1676 if (td == (struct thread *)LK_KERNPROC)
1677 printf("lock type %s: EXCL by KERNPROC\n",
1678 lk->lock_object.lo_name);
1680 printf("lock type %s: EXCL by thread %p "
1681 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1682 td, td->td_proc->p_pid, td->td_proc->p_comm,
1687 if (x & LK_EXCLUSIVE_WAITERS)
1688 printf(" with exclusive waiters pending\n");
1689 if (x & LK_SHARED_WAITERS)
1690 printf(" with shared waiters pending\n");
1691 if (x & LK_EXCLUSIVE_SPINNERS)
1692 printf(" with exclusive spinners pending\n");
1698 lockstatus(const struct lock *lk)
1704 x = lockmgr_read_value(lk);
1707 if ((x & LK_SHARE) == 0) {
1708 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1712 } else if (x == LK_UNLOCKED)
1718 #ifdef INVARIANT_SUPPORT
1720 FEATURE(invariant_support,
1721 "Support for modules compiled with INVARIANTS option");
1724 #undef _lockmgr_assert
1728 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1732 if (KERNEL_PANICKED())
1736 case KA_SLOCKED | KA_NOTRECURSED:
1737 case KA_SLOCKED | KA_RECURSED:
1740 case KA_LOCKED | KA_NOTRECURSED:
1741 case KA_LOCKED | KA_RECURSED:
1745 * We cannot trust WITNESS if the lock is held in exclusive
1746 * mode and a call to lockmgr_disown() happened.
1747 * Workaround this skipping the check if the lock is held in
1748 * exclusive mode even for the KA_LOCKED case.
1750 if (slocked || (lk->lk_lock & LK_SHARE)) {
1751 witness_assert(&lk->lock_object, what, file, line);
1755 if (lk->lk_lock == LK_UNLOCKED ||
1756 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1757 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1758 panic("Lock %s not %slocked @ %s:%d\n",
1759 lk->lock_object.lo_name, slocked ? "share" : "",
1762 if ((lk->lk_lock & LK_SHARE) == 0) {
1763 if (lockmgr_recursed(lk)) {
1764 if (what & KA_NOTRECURSED)
1765 panic("Lock %s recursed @ %s:%d\n",
1766 lk->lock_object.lo_name, file,
1768 } else if (what & KA_RECURSED)
1769 panic("Lock %s not recursed @ %s:%d\n",
1770 lk->lock_object.lo_name, file, line);
1774 case KA_XLOCKED | KA_NOTRECURSED:
1775 case KA_XLOCKED | KA_RECURSED:
1776 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1777 panic("Lock %s not exclusively locked @ %s:%d\n",
1778 lk->lock_object.lo_name, file, line);
1779 if (lockmgr_recursed(lk)) {
1780 if (what & KA_NOTRECURSED)
1781 panic("Lock %s recursed @ %s:%d\n",
1782 lk->lock_object.lo_name, file, line);
1783 } else if (what & KA_RECURSED)
1784 panic("Lock %s not recursed @ %s:%d\n",
1785 lk->lock_object.lo_name, file, line);
1788 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1789 panic("Lock %s exclusively locked @ %s:%d\n",
1790 lk->lock_object.lo_name, file, line);
1793 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1801 lockmgr_chain(struct thread *td, struct thread **ownerp)
1803 const struct lock *lk;
1807 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1809 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1810 if (lk->lk_lock & LK_SHARE)
1811 db_printf("SHARED (count %ju)\n",
1812 (uintmax_t)LK_SHARERS(lk->lk_lock));
1814 db_printf("EXCL\n");
1815 *ownerp = lockmgr_xholder(lk);
1821 db_show_lockmgr(const struct lock_object *lock)
1824 const struct lock *lk;
1826 lk = (const struct lock *)lock;
1828 db_printf(" state: ");
1829 if (lk->lk_lock == LK_UNLOCKED)
1830 db_printf("UNLOCKED\n");
1831 else if (lk->lk_lock & LK_SHARE)
1832 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1834 td = lockmgr_xholder(lk);
1835 if (td == (struct thread *)LK_KERNPROC)
1836 db_printf("XLOCK: LK_KERNPROC\n");
1838 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1839 td->td_tid, td->td_proc->p_pid,
1840 td->td_proc->p_comm);
1841 if (lockmgr_recursed(lk))
1842 db_printf(" recursed: %d\n", lk->lk_recurse);
1844 db_printf(" waiters: ");
1845 switch (lk->lk_lock & LK_ALL_WAITERS) {
1846 case LK_SHARED_WAITERS:
1847 db_printf("shared\n");
1849 case LK_EXCLUSIVE_WAITERS:
1850 db_printf("exclusive\n");
1852 case LK_ALL_WAITERS:
1853 db_printf("shared and exclusive\n");
1856 db_printf("none\n");
1858 db_printf(" spinners: ");
1859 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1860 db_printf("exclusive\n");
1862 db_printf("none\n");