2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/lockstat.h>
44 #include <sys/mutex.h>
46 #include <sys/sleepqueue.h>
48 #include <sys/stack.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
53 #include <machine/cpu.h>
60 #include <sys/pmckern.h>
61 PMC_SOFT_DECLARE( , , lock, failed);
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 #define SQ_EXCLUSIVE_QUEUE 0
68 #define SQ_SHARED_QUEUE 1
71 #define _lockmgr_assert(lk, what, file, line)
74 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
75 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
78 #define STACK_PRINT(lk)
79 #define STACK_SAVE(lk)
80 #define STACK_ZERO(lk)
82 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
83 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
84 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
87 #define LOCK_LOG2(lk, string, arg1, arg2) \
88 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
89 CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
91 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
92 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
94 #define GIANT_DECLARE \
96 WITNESS_SAVE_DECL(Giant)
97 #define GIANT_RESTORE() do { \
98 if (__predict_false(_i > 0)) { \
101 WITNESS_RESTORE(&Giant.lock_object, Giant); \
104 #define GIANT_SAVE() do { \
105 if (__predict_false(mtx_owned(&Giant))) { \
106 WITNESS_SAVE(&Giant.lock_object, Giant); \
107 while (mtx_owned(&Giant)) { \
109 mtx_unlock(&Giant); \
114 static bool __always_inline
115 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
118 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
121 if (fp || (!(x & LK_SHARE)))
123 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
124 (curthread->td_pflags & TDP_DEADLKTREAT))
129 #define LK_TRYOP(x) \
132 #define LK_CAN_WITNESS(x) \
133 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
134 #define LK_TRYWIT(x) \
135 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
137 #define lockmgr_disowned(lk) \
138 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
140 #define lockmgr_xlocked_v(v) \
141 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
143 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
145 static void assert_lockmgr(const struct lock_object *lock, int how);
147 static void db_show_lockmgr(const struct lock_object *lock);
149 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
151 static int owner_lockmgr(const struct lock_object *lock,
152 struct thread **owner);
154 static uintptr_t unlock_lockmgr(struct lock_object *lock);
156 struct lock_class lock_class_lockmgr = {
157 .lc_name = "lockmgr",
158 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
159 .lc_assert = assert_lockmgr,
161 .lc_ddb_show = db_show_lockmgr,
163 .lc_lock = lock_lockmgr,
164 .lc_unlock = unlock_lockmgr,
166 .lc_owner = owner_lockmgr,
170 static __read_mostly bool lk_adaptive = true;
171 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
172 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
174 #define lockmgr_delay locks_delay
176 struct lockmgr_wait {
182 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
184 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
187 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
189 struct lock_class *class;
191 if (flags & LK_INTERLOCK) {
192 class = LOCK_CLASS(ilk);
193 class->lc_unlock(ilk);
196 if (__predict_false(wakeup_swapper))
201 lockmgr_note_shared_acquire(struct lock *lk, int contested,
202 uint64_t waittime, const char *file, int line, int flags)
205 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
206 waittime, file, line, LOCKSTAT_READER);
207 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
208 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
209 TD_LOCKS_INC(curthread);
210 TD_SLOCKS_INC(curthread);
215 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
218 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
219 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
220 TD_LOCKS_DEC(curthread);
221 TD_SLOCKS_DEC(curthread);
225 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
226 uint64_t waittime, const char *file, int line, int flags)
229 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
230 waittime, file, line, LOCKSTAT_WRITER);
231 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
232 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
234 TD_LOCKS_INC(curthread);
239 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
242 if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
243 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
244 TD_LOCKS_DEC(curthread);
246 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
250 static __inline struct thread *
251 lockmgr_xholder(const struct lock *lk)
255 x = lockmgr_read_value(lk);
256 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
260 * It assumes sleepq_lock held and returns with this one unheld.
261 * It also assumes the generic interlock is sane and previously checked.
262 * If LK_INTERLOCK is specified the interlock is not reacquired after the
266 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
267 const char *wmesg, int pri, int timo, int queue)
270 struct lock_class *class;
273 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
274 catch = pri & PCATCH;
278 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
279 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
281 if (flags & LK_INTERLOCK)
282 class->lc_unlock(ilk);
283 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
286 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
287 SLEEPQ_INTERRUPTIBLE : 0), queue);
288 if ((flags & LK_TIMELOCK) && timo)
289 sleepq_set_timeout(&lk->lock_object, timo);
292 * Decisional switch for real sleeping.
294 if ((flags & LK_TIMELOCK) && timo && catch)
295 error = sleepq_timedwait_sig(&lk->lock_object, pri);
296 else if ((flags & LK_TIMELOCK) && timo)
297 error = sleepq_timedwait(&lk->lock_object, pri);
299 error = sleepq_wait_sig(&lk->lock_object, pri);
301 sleepq_wait(&lk->lock_object, pri);
303 if ((flags & LK_SLEEPFAIL) && error == 0)
310 wakeupshlk(struct lock *lk, const char *file, int line)
312 uintptr_t v, x, orig_x;
314 int queue, wakeup_swapper;
318 x = lockmgr_read_value(lk);
319 if (lockmgr_sunlock_try(lk, &x))
323 * We should have a sharer with waiters, so enter the hard
324 * path in order to handle wakeups correctly.
326 sleepq_lock(&lk->lock_object);
327 orig_x = lockmgr_read_value(lk);
329 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
333 * If the lock has exclusive waiters, give them preference in
334 * order to avoid deadlock with shared runners up.
335 * If interruptible sleeps left the exclusive queue empty
336 * avoid a starvation for the threads sleeping on the shared
337 * queue by giving them precedence and cleaning up the
338 * exclusive waiters bit anyway.
339 * Please note that lk_exslpfail count may be lying about
340 * the real number of waiters with the LK_SLEEPFAIL flag on
341 * because they may be used in conjunction with interruptible
342 * sleeps so lk_exslpfail might be considered an 'upper limit'
343 * bound, including the edge cases.
345 realexslp = sleepq_sleepcnt(&lk->lock_object,
347 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
348 if (lk->lk_exslpfail < realexslp) {
349 lk->lk_exslpfail = 0;
350 queue = SQ_EXCLUSIVE_QUEUE;
351 v |= (x & LK_SHARED_WAITERS);
353 lk->lk_exslpfail = 0;
355 "%s: %p has only LK_SLEEPFAIL sleepers",
358 "%s: %p waking up threads on the exclusive queue",
361 sleepq_broadcast(&lk->lock_object,
362 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
363 queue = SQ_SHARED_QUEUE;
368 * Exclusive waiters sleeping with LK_SLEEPFAIL on
369 * and using interruptible sleeps/timeout may have
370 * left spourious lk_exslpfail counts on, so clean
373 lk->lk_exslpfail = 0;
374 queue = SQ_SHARED_QUEUE;
377 if (lockmgr_sunlock_try(lk, &orig_x)) {
378 sleepq_release(&lk->lock_object);
382 x |= LK_SHARERS_LOCK(1);
383 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
387 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
388 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
390 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
392 sleepq_release(&lk->lock_object);
396 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
397 return (wakeup_swapper);
401 assert_lockmgr(const struct lock_object *lock, int what)
404 panic("lockmgr locks do not support assertions");
408 lock_lockmgr(struct lock_object *lock, uintptr_t how)
411 panic("lockmgr locks do not support sleep interlocking");
415 unlock_lockmgr(struct lock_object *lock)
418 panic("lockmgr locks do not support sleep interlocking");
423 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
426 panic("lockmgr locks do not support owner inquiring");
431 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
435 MPASS((flags & ~LK_INIT_MASK) == 0);
436 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
437 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
440 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
441 if (flags & LK_CANRECURSE)
442 iflags |= LO_RECURSABLE;
443 if ((flags & LK_NODUP) == 0)
445 if (flags & LK_NOPROFILE)
446 iflags |= LO_NOPROFILE;
447 if ((flags & LK_NOWITNESS) == 0)
448 iflags |= LO_WITNESS;
449 if (flags & LK_QUIET)
451 if (flags & LK_IS_VNODE)
452 iflags |= LO_IS_VNODE;
455 iflags |= flags & LK_NOSHARE;
457 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
458 lk->lk_lock = LK_UNLOCKED;
460 lk->lk_exslpfail = 0;
467 * XXX: Gross hacks to manipulate external lock flags after
468 * initialization. Used for certain vnode and buf locks.
471 lockallowshare(struct lock *lk)
474 lockmgr_assert(lk, KA_XLOCKED);
475 lk->lock_object.lo_flags &= ~LK_NOSHARE;
479 lockdisableshare(struct lock *lk)
482 lockmgr_assert(lk, KA_XLOCKED);
483 lk->lock_object.lo_flags |= LK_NOSHARE;
487 lockallowrecurse(struct lock *lk)
490 lockmgr_assert(lk, KA_XLOCKED);
491 lk->lock_object.lo_flags |= LO_RECURSABLE;
495 lockdisablerecurse(struct lock *lk)
498 lockmgr_assert(lk, KA_XLOCKED);
499 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
503 lockdestroy(struct lock *lk)
506 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
507 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
508 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
509 lock_destroy(&lk->lock_object);
512 static bool __always_inline
513 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
517 * If no other thread has an exclusive lock, or
518 * no exclusive waiter is present, bump the count of
519 * sharers. Since we have to preserve the state of
520 * waiters, if we fail to acquire the shared lock
521 * loop back and retry.
523 while (LK_CAN_SHARE(*xp, flags, fp)) {
524 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
525 *xp + LK_ONE_SHARER)) {
532 static bool __always_inline
533 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
537 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
538 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
539 *xp - LK_ONE_SHARER))
549 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
552 struct thread *owner;
556 MPASS(x != LK_UNLOCKED);
557 owner = (struct thread *)LK_HOLDER(x);
559 MPASS(owner != curthread);
560 if (owner == (struct thread *)LK_KERNPROC)
562 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
566 if (!TD_IS_RUNNING(owner))
568 if ((x & LK_ALL_WAITERS) != 0)
571 x = lockmgr_read_value(lk);
572 if (LK_CAN_SHARE(x, flags, false)) {
576 owner = (struct thread *)LK_HOLDER(x);
580 static __noinline int
581 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
582 const char *file, int line, struct lockmgr_wait *lwa)
590 uint64_t sleep_time = 0;
592 #ifdef LOCK_PROFILING
593 uint64_t waittime = 0;
596 struct lock_delay_arg lda;
598 if (KERNEL_PANICKED())
601 tid = (uintptr_t)curthread;
603 if (LK_CAN_WITNESS(flags))
604 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
605 file, line, flags & LK_INTERLOCK ? ilk : NULL);
606 x = lockmgr_read_value(lk);
607 lock_delay_arg_init(&lda, &lockmgr_delay);
609 flags &= ~LK_ADAPTIVE;
611 * The lock may already be locked exclusive by curthread,
614 if (LK_HOLDER(x) == tid) {
616 "%s: %p already held in exclusive mode",
623 if (lockmgr_slock_try(lk, &x, flags, false))
626 lock_profile_obtain_lock_failed(&lk->lock_object, false,
627 &contested, &waittime);
629 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
630 if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
635 PMC_SOFT_CALL( , , lock, failed);
639 * If the lock is expected to not sleep just give up
642 if (LK_TRYOP(flags)) {
643 LOCK_LOG2(lk, "%s: %p fails the try operation",
650 * Acquire the sleepqueue chain lock because we
651 * probabilly will need to manipulate waiters flags.
653 sleepq_lock(&lk->lock_object);
654 x = lockmgr_read_value(lk);
658 * if the lock can be acquired in shared mode, try
661 if (LK_CAN_SHARE(x, flags, false)) {
662 sleepq_release(&lk->lock_object);
667 * Try to set the LK_SHARED_WAITERS flag. If we fail,
668 * loop back and retry.
670 if ((x & LK_SHARED_WAITERS) == 0) {
671 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
672 x | LK_SHARED_WAITERS)) {
675 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
680 iwmesg = lk->lock_object.lo_name;
684 iwmesg = lwa->iwmesg;
690 * As far as we have been unable to acquire the
691 * shared lock and the shared waiters flag is set,
695 sleep_time -= lockstat_nsecs(&lk->lock_object);
697 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
700 sleep_time += lockstat_nsecs(&lk->lock_object);
702 flags &= ~LK_INTERLOCK;
705 "%s: interrupted sleep for %p with %d",
706 __func__, lk, error);
709 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
711 x = lockmgr_read_value(lk);
716 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
717 LOCKSTAT_READER, (x & LK_SHARE) == 0,
718 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
720 #ifdef LOCK_PROFILING
721 lockmgr_note_shared_acquire(lk, contested, waittime,
724 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
730 lockmgr_exit(flags, ilk, 0);
735 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
737 struct thread *owner;
741 MPASS(x != LK_UNLOCKED);
742 owner = (struct thread *)LK_HOLDER(x);
744 MPASS(owner != curthread);
747 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
749 if (owner == (struct thread *)LK_KERNPROC)
751 if (!TD_IS_RUNNING(owner))
753 if ((x & LK_ALL_WAITERS) != 0)
756 x = lockmgr_read_value(lk);
757 if (x == LK_UNLOCKED) {
761 owner = (struct thread *)LK_HOLDER(x);
765 static __noinline int
766 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
767 const char *file, int line, struct lockmgr_wait *lwa)
769 struct lock_class *class;
776 uint64_t sleep_time = 0;
778 #ifdef LOCK_PROFILING
779 uint64_t waittime = 0;
782 struct lock_delay_arg lda;
784 if (KERNEL_PANICKED())
787 tid = (uintptr_t)curthread;
789 if (LK_CAN_WITNESS(flags))
790 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
791 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
795 * If curthread already holds the lock and this one is
796 * allowed to recurse, simply recurse on it.
798 if (lockmgr_xlocked(lk)) {
799 if ((flags & LK_CANRECURSE) == 0 &&
800 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
802 * If the lock is expected to not panic just
803 * give up and return.
805 if (LK_TRYOP(flags)) {
807 "%s: %p fails the try operation",
812 if (flags & LK_INTERLOCK) {
813 class = LOCK_CLASS(ilk);
814 class->lc_unlock(ilk);
817 panic("%s: recursing on non recursive lockmgr %p "
818 "@ %s:%d\n", __func__, lk, file, line);
820 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
822 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
823 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
824 lk->lk_recurse, file, line);
825 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
826 LK_TRYWIT(flags), file, line);
827 TD_LOCKS_INC(curthread);
832 lock_delay_arg_init(&lda, &lockmgr_delay);
834 flags &= ~LK_ADAPTIVE;
836 if (x == LK_UNLOCKED) {
837 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
842 lock_profile_obtain_lock_failed(&lk->lock_object, false,
843 &contested, &waittime);
845 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
846 if (lockmgr_xlock_adaptive(&lda, lk, &x))
850 PMC_SOFT_CALL( , , lock, failed);
854 * If the lock is expected to not sleep just give up
857 if (LK_TRYOP(flags)) {
858 LOCK_LOG2(lk, "%s: %p fails the try operation",
865 * Acquire the sleepqueue chain lock because we
866 * probabilly will need to manipulate waiters flags.
868 sleepq_lock(&lk->lock_object);
869 x = lockmgr_read_value(lk);
873 * if the lock has been released while we spun on
874 * the sleepqueue chain lock just try again.
876 if (x == LK_UNLOCKED) {
877 sleepq_release(&lk->lock_object);
882 * The lock can be in the state where there is a
883 * pending queue of waiters, but still no owner.
884 * This happens when the lock is contested and an
885 * owner is going to claim the lock.
886 * If curthread is the one successfully acquiring it
887 * claim lock ownership and return, preserving waiters
890 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
891 if ((x & ~v) == LK_UNLOCKED) {
892 v &= ~LK_EXCLUSIVE_SPINNERS;
893 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
895 sleepq_release(&lk->lock_object);
897 "%s: %p claimed by a new writer",
905 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
906 * fail, loop back and retry.
908 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
909 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
910 x | LK_EXCLUSIVE_WAITERS)) {
913 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
918 iwmesg = lk->lock_object.lo_name;
922 iwmesg = lwa->iwmesg;
928 * As far as we have been unable to acquire the
929 * exclusive lock and the exclusive waiters flag
930 * is set, we will sleep.
933 sleep_time -= lockstat_nsecs(&lk->lock_object);
935 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
938 sleep_time += lockstat_nsecs(&lk->lock_object);
940 flags &= ~LK_INTERLOCK;
943 "%s: interrupted sleep for %p with %d",
944 __func__, lk, error);
947 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
949 x = lockmgr_read_value(lk);
954 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
955 LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
956 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
958 #ifdef LOCK_PROFILING
959 lockmgr_note_exclusive_acquire(lk, contested, waittime,
962 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
968 lockmgr_exit(flags, ilk, 0);
972 static __noinline int
973 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
974 const char *file, int line, struct lockmgr_wait *lwa)
976 uintptr_t tid, v, setv;
980 if (KERNEL_PANICKED())
983 tid = (uintptr_t)curthread;
985 _lockmgr_assert(lk, KA_SLOCKED, file, line);
987 op = flags & LK_TYPE_MASK;
988 v = lockmgr_read_value(lk);
990 if (LK_SHARERS(v) > 1) {
991 if (op == LK_TRYUPGRADE) {
992 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
997 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
998 v - LK_ONE_SHARER)) {
999 lockmgr_note_shared_release(lk, file, line);
1004 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1007 setv |= (v & LK_ALL_WAITERS);
1010 * Try to switch from one shared lock to an exclusive one.
1011 * We need to preserve waiters flags during the operation.
1013 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1014 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1016 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1017 LK_TRYWIT(flags), file, line);
1018 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1019 TD_SLOCKS_DEC(curthread);
1025 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1026 flags &= ~LK_INTERLOCK;
1028 lockmgr_exit(flags, ilk, 0);
1033 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1034 const char *file, int line)
1036 struct lock_class *class;
1041 if (KERNEL_PANICKED())
1044 op = flags & LK_TYPE_MASK;
1048 if (LK_CAN_WITNESS(flags))
1049 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1050 file, line, flags & LK_INTERLOCK ? ilk : NULL);
1051 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1053 x = lockmgr_read_value(lk);
1054 if (lockmgr_slock_try(lk, &x, flags, true)) {
1055 lockmgr_note_shared_acquire(lk, 0, 0,
1059 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1064 if (LK_CAN_WITNESS(flags))
1065 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1066 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1068 tid = (uintptr_t)curthread;
1069 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1070 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1071 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1075 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1081 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1085 if (__predict_true(locked)) {
1086 if (__predict_false(flags & LK_INTERLOCK)) {
1087 class = LOCK_CLASS(ilk);
1088 class->lc_unlock(ilk);
1092 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1093 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1097 static __noinline int
1098 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1099 const char *file, int line)
1102 int wakeup_swapper = 0;
1104 if (KERNEL_PANICKED())
1107 wakeup_swapper = wakeupshlk(lk, file, line);
1110 lockmgr_exit(flags, ilk, wakeup_swapper);
1114 static __noinline int
1115 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1116 const char *file, int line)
1119 int wakeup_swapper = 0;
1123 if (KERNEL_PANICKED())
1126 tid = (uintptr_t)curthread;
1129 * As first option, treact the lock as if it has not
1131 * Fix-up the tid var if the lock has been disowned.
1133 if (LK_HOLDER(x) == LK_KERNPROC)
1137 * The lock is held in exclusive mode.
1138 * If the lock is recursed also, then unrecurse it.
1140 if (lockmgr_recursed_v(x)) {
1141 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1143 if (lk->lk_recurse == 0)
1144 atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1147 if (tid != LK_KERNPROC)
1148 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1151 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1154 sleepq_lock(&lk->lock_object);
1155 x = lockmgr_read_value(lk);
1159 * If the lock has exclusive waiters, give them
1160 * preference in order to avoid deadlock with
1161 * shared runners up.
1162 * If interruptible sleeps left the exclusive queue
1163 * empty avoid a starvation for the threads sleeping
1164 * on the shared queue by giving them precedence
1165 * and cleaning up the exclusive waiters bit anyway.
1166 * Please note that lk_exslpfail count may be lying
1167 * about the real number of waiters with the
1168 * LK_SLEEPFAIL flag on because they may be used in
1169 * conjunction with interruptible sleeps so
1170 * lk_exslpfail might be considered an 'upper limit'
1171 * bound, including the edge cases.
1173 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1174 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1175 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1176 if (lk->lk_exslpfail < realexslp) {
1177 lk->lk_exslpfail = 0;
1178 queue = SQ_EXCLUSIVE_QUEUE;
1179 v |= (x & LK_SHARED_WAITERS);
1181 lk->lk_exslpfail = 0;
1183 "%s: %p has only LK_SLEEPFAIL sleepers",
1186 "%s: %p waking up threads on the exclusive queue",
1188 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1189 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1190 queue = SQ_SHARED_QUEUE;
1194 * Exclusive waiters sleeping with LK_SLEEPFAIL
1195 * on and using interruptible sleeps/timeout
1196 * may have left spourious lk_exslpfail counts
1197 * on, so clean it up anyway.
1199 lk->lk_exslpfail = 0;
1200 queue = SQ_SHARED_QUEUE;
1203 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1204 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1206 atomic_store_rel_ptr(&lk->lk_lock, v);
1207 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1208 sleepq_release(&lk->lock_object);
1211 lockmgr_exit(flags, ilk, wakeup_swapper);
1216 * Lightweight entry points for common operations.
1218 * Functionality is similar to sx locks, in that none of the additional lockmgr
1219 * features are supported. To be clear, these are NOT supported:
1220 * 1. shared locking disablement
1221 * 2. returning with an error after sleep
1222 * 3. unlocking the interlock
1224 * If in doubt, use lockmgr_lock_flags.
1227 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1231 MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1232 MPASS((flags & LK_INTERLOCK) == 0);
1233 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1235 if (LK_CAN_WITNESS(flags))
1236 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1238 x = lockmgr_read_value(lk);
1239 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1240 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1244 return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1248 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1252 MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1253 MPASS((flags & LK_INTERLOCK) == 0);
1255 if (LK_CAN_WITNESS(flags))
1256 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1257 LOP_EXCLUSIVE, file, line, NULL);
1258 tid = (uintptr_t)curthread;
1259 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1260 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1265 return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1269 lockmgr_unlock(struct lock *lk)
1278 _lockmgr_assert(lk, KA_LOCKED, file, line);
1279 x = lockmgr_read_value(lk);
1280 if (__predict_true(x & LK_SHARE) != 0) {
1281 lockmgr_note_shared_release(lk, file, line);
1282 if (lockmgr_sunlock_try(lk, &x)) {
1283 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1285 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1288 tid = (uintptr_t)curthread;
1289 lockmgr_note_exclusive_release(lk, file, line);
1290 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1291 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1293 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1300 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1301 const char *wmesg, int pri, int timo, const char *file, int line)
1304 struct lockmgr_wait lwa;
1305 struct lock_class *class;
1307 uintptr_t tid, v, x;
1308 u_int op, realexslp;
1309 int error, ipri, itimo, queue, wakeup_swapper;
1310 #ifdef LOCK_PROFILING
1311 uint64_t waittime = 0;
1315 if (KERNEL_PANICKED())
1319 tid = (uintptr_t)curthread;
1320 op = (flags & LK_TYPE_MASK);
1321 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1322 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1323 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1325 lwa.iwmesg = iwmesg;
1329 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1330 KASSERT((op & (op - 1)) == 0,
1331 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1332 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1333 (op != LK_DOWNGRADE && op != LK_RELEASE),
1334 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1335 __func__, file, line));
1336 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1337 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1338 __func__, file, line));
1339 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1340 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1341 lk->lock_object.lo_name, file, line));
1343 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1345 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1353 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1355 if (flags & LK_INTERLOCK)
1356 class->lc_unlock(ilk);
1364 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1368 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1371 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1374 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1375 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1378 * Panic if the lock is recursed.
1380 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1381 if (flags & LK_INTERLOCK)
1382 class->lc_unlock(ilk);
1383 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1384 __func__, iwmesg, file, line);
1386 TD_SLOCKS_INC(curthread);
1389 * In order to preserve waiters flags, just spin.
1392 x = lockmgr_read_value(lk);
1393 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1394 x &= LK_ALL_WAITERS;
1395 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1396 LK_SHARERS_LOCK(1) | x))
1400 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1401 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1404 _lockmgr_assert(lk, KA_LOCKED, file, line);
1405 x = lockmgr_read_value(lk);
1407 if (__predict_true(x & LK_SHARE) != 0) {
1408 lockmgr_note_shared_release(lk, file, line);
1409 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1411 lockmgr_note_exclusive_release(lk, file, line);
1412 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1416 if (LK_CAN_WITNESS(flags))
1417 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1418 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1422 * Trying to drain a lock we already own will result in a
1425 if (lockmgr_xlocked(lk)) {
1426 if (flags & LK_INTERLOCK)
1427 class->lc_unlock(ilk);
1428 panic("%s: draining %s with the lock held @ %s:%d\n",
1429 __func__, iwmesg, file, line);
1433 if (lk->lk_lock == LK_UNLOCKED &&
1434 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1438 PMC_SOFT_CALL( , , lock, failed);
1440 lock_profile_obtain_lock_failed(&lk->lock_object, false,
1441 &contested, &waittime);
1444 * If the lock is expected to not sleep just give up
1447 if (LK_TRYOP(flags)) {
1448 LOCK_LOG2(lk, "%s: %p fails the try operation",
1455 * Acquire the sleepqueue chain lock because we
1456 * probabilly will need to manipulate waiters flags.
1458 sleepq_lock(&lk->lock_object);
1459 x = lockmgr_read_value(lk);
1462 * if the lock has been released while we spun on
1463 * the sleepqueue chain lock just try again.
1465 if (x == LK_UNLOCKED) {
1466 sleepq_release(&lk->lock_object);
1470 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1471 if ((x & ~v) == LK_UNLOCKED) {
1472 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1475 * If interruptible sleeps left the exclusive
1476 * queue empty avoid a starvation for the
1477 * threads sleeping on the shared queue by
1478 * giving them precedence and cleaning up the
1479 * exclusive waiters bit anyway.
1480 * Please note that lk_exslpfail count may be
1481 * lying about the real number of waiters with
1482 * the LK_SLEEPFAIL flag on because they may
1483 * be used in conjunction with interruptible
1484 * sleeps so lk_exslpfail might be considered
1485 * an 'upper limit' bound, including the edge
1488 if (v & LK_EXCLUSIVE_WAITERS) {
1489 queue = SQ_EXCLUSIVE_QUEUE;
1490 v &= ~LK_EXCLUSIVE_WAITERS;
1493 * Exclusive waiters sleeping with
1494 * LK_SLEEPFAIL on and using
1495 * interruptible sleeps/timeout may
1496 * have left spourious lk_exslpfail
1497 * counts on, so clean it up anyway.
1499 MPASS(v & LK_SHARED_WAITERS);
1500 lk->lk_exslpfail = 0;
1501 queue = SQ_SHARED_QUEUE;
1502 v &= ~LK_SHARED_WAITERS;
1504 if (queue == SQ_EXCLUSIVE_QUEUE) {
1506 sleepq_sleepcnt(&lk->lock_object,
1507 SQ_EXCLUSIVE_QUEUE);
1508 if (lk->lk_exslpfail >= realexslp) {
1509 lk->lk_exslpfail = 0;
1510 queue = SQ_SHARED_QUEUE;
1511 v &= ~LK_SHARED_WAITERS;
1512 if (realexslp != 0) {
1514 "%s: %p has only LK_SLEEPFAIL sleepers",
1517 "%s: %p waking up threads on the exclusive queue",
1523 SQ_EXCLUSIVE_QUEUE);
1526 lk->lk_exslpfail = 0;
1528 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1529 sleepq_release(&lk->lock_object);
1533 "%s: %p waking up all threads on the %s queue",
1534 __func__, lk, queue == SQ_SHARED_QUEUE ?
1535 "shared" : "exclusive");
1536 wakeup_swapper |= sleepq_broadcast(
1537 &lk->lock_object, SLEEPQ_LK, 0, queue);
1540 * If shared waiters have been woken up we need
1541 * to wait for one of them to acquire the lock
1542 * before to set the exclusive waiters in
1543 * order to avoid a deadlock.
1545 if (queue == SQ_SHARED_QUEUE) {
1546 for (v = lk->lk_lock;
1547 (v & LK_SHARE) && !LK_SHARERS(v);
1554 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1555 * fail, loop back and retry.
1557 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1558 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1559 x | LK_EXCLUSIVE_WAITERS)) {
1560 sleepq_release(&lk->lock_object);
1563 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1568 * As far as we have been unable to acquire the
1569 * exclusive lock and the exclusive waiters flag
1570 * is set, we will sleep.
1572 if (flags & LK_INTERLOCK) {
1573 class->lc_unlock(ilk);
1574 flags &= ~LK_INTERLOCK;
1577 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1578 SQ_EXCLUSIVE_QUEUE);
1579 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1581 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1586 lock_profile_obtain_lock_success(&lk->lock_object,
1587 false, contested, waittime, file, line);
1588 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1589 lk->lk_recurse, file, line);
1590 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1591 LK_TRYWIT(flags), file, line);
1592 TD_LOCKS_INC(curthread);
1597 if (flags & LK_INTERLOCK)
1598 class->lc_unlock(ilk);
1599 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1602 if (flags & LK_INTERLOCK)
1603 class->lc_unlock(ilk);
1611 _lockmgr_disown(struct lock *lk, const char *file, int line)
1615 if (SCHEDULER_STOPPED())
1618 tid = (uintptr_t)curthread;
1619 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1622 * Panic if the lock is recursed.
1624 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1625 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1626 __func__, file, line);
1629 * If the owner is already LK_KERNPROC just skip the whole operation.
1631 if (LK_HOLDER(lk->lk_lock) != tid)
1633 lock_profile_release_lock(&lk->lock_object, false);
1634 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1635 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1636 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1637 TD_LOCKS_DEC(curthread);
1641 * In order to preserve waiters flags, just spin.
1644 x = lockmgr_read_value(lk);
1645 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1646 x &= LK_ALL_WAITERS;
1647 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1655 lockmgr_printinfo(const struct lock *lk)
1660 if (lk->lk_lock == LK_UNLOCKED)
1661 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1662 else if (lk->lk_lock & LK_SHARE)
1663 printf("lock type %s: SHARED (count %ju)\n",
1664 lk->lock_object.lo_name,
1665 (uintmax_t)LK_SHARERS(lk->lk_lock));
1667 td = lockmgr_xholder(lk);
1668 if (td == (struct thread *)LK_KERNPROC)
1669 printf("lock type %s: EXCL by KERNPROC\n",
1670 lk->lock_object.lo_name);
1672 printf("lock type %s: EXCL by thread %p "
1673 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1674 td, td->td_proc->p_pid, td->td_proc->p_comm,
1679 if (x & LK_EXCLUSIVE_WAITERS)
1680 printf(" with exclusive waiters pending\n");
1681 if (x & LK_SHARED_WAITERS)
1682 printf(" with shared waiters pending\n");
1683 if (x & LK_EXCLUSIVE_SPINNERS)
1684 printf(" with exclusive spinners pending\n");
1690 lockstatus(const struct lock *lk)
1696 x = lockmgr_read_value(lk);
1699 if ((x & LK_SHARE) == 0) {
1700 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1704 } else if (x == LK_UNLOCKED)
1710 #ifdef INVARIANT_SUPPORT
1712 FEATURE(invariant_support,
1713 "Support for modules compiled with INVARIANTS option");
1716 #undef _lockmgr_assert
1720 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1724 if (KERNEL_PANICKED())
1728 case KA_SLOCKED | KA_NOTRECURSED:
1729 case KA_SLOCKED | KA_RECURSED:
1732 case KA_LOCKED | KA_NOTRECURSED:
1733 case KA_LOCKED | KA_RECURSED:
1737 * We cannot trust WITNESS if the lock is held in exclusive
1738 * mode and a call to lockmgr_disown() happened.
1739 * Workaround this skipping the check if the lock is held in
1740 * exclusive mode even for the KA_LOCKED case.
1742 if (slocked || (lk->lk_lock & LK_SHARE)) {
1743 witness_assert(&lk->lock_object, what, file, line);
1747 if (lk->lk_lock == LK_UNLOCKED ||
1748 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1749 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1750 panic("Lock %s not %slocked @ %s:%d\n",
1751 lk->lock_object.lo_name, slocked ? "share" : "",
1754 if ((lk->lk_lock & LK_SHARE) == 0) {
1755 if (lockmgr_recursed(lk)) {
1756 if (what & KA_NOTRECURSED)
1757 panic("Lock %s recursed @ %s:%d\n",
1758 lk->lock_object.lo_name, file,
1760 } else if (what & KA_RECURSED)
1761 panic("Lock %s not recursed @ %s:%d\n",
1762 lk->lock_object.lo_name, file, line);
1766 case KA_XLOCKED | KA_NOTRECURSED:
1767 case KA_XLOCKED | KA_RECURSED:
1768 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1769 panic("Lock %s not exclusively locked @ %s:%d\n",
1770 lk->lock_object.lo_name, file, line);
1771 if (lockmgr_recursed(lk)) {
1772 if (what & KA_NOTRECURSED)
1773 panic("Lock %s recursed @ %s:%d\n",
1774 lk->lock_object.lo_name, file, line);
1775 } else if (what & KA_RECURSED)
1776 panic("Lock %s not recursed @ %s:%d\n",
1777 lk->lock_object.lo_name, file, line);
1780 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1781 panic("Lock %s exclusively locked @ %s:%d\n",
1782 lk->lock_object.lo_name, file, line);
1785 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1793 lockmgr_chain(struct thread *td, struct thread **ownerp)
1795 const struct lock *lk;
1799 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1801 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1802 if (lk->lk_lock & LK_SHARE)
1803 db_printf("SHARED (count %ju)\n",
1804 (uintmax_t)LK_SHARERS(lk->lk_lock));
1806 db_printf("EXCL\n");
1807 *ownerp = lockmgr_xholder(lk);
1813 db_show_lockmgr(const struct lock_object *lock)
1816 const struct lock *lk;
1818 lk = (const struct lock *)lock;
1820 db_printf(" state: ");
1821 if (lk->lk_lock == LK_UNLOCKED)
1822 db_printf("UNLOCKED\n");
1823 else if (lk->lk_lock & LK_SHARE)
1824 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1826 td = lockmgr_xholder(lk);
1827 if (td == (struct thread *)LK_KERNPROC)
1828 db_printf("XLOCK: LK_KERNPROC\n");
1830 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1831 td->td_tid, td->td_proc->p_pid,
1832 td->td_proc->p_comm);
1833 if (lockmgr_recursed(lk))
1834 db_printf(" recursed: %d\n", lk->lk_recurse);
1836 db_printf(" waiters: ");
1837 switch (lk->lk_lock & LK_ALL_WAITERS) {
1838 case LK_SHARED_WAITERS:
1839 db_printf("shared\n");
1841 case LK_EXCLUSIVE_WAITERS:
1842 db_printf("exclusive\n");
1844 case LK_ALL_WAITERS:
1845 db_printf("shared and exclusive\n");
1848 db_printf("none\n");
1850 db_printf(" spinners: ");
1851 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1852 db_printf("exclusive\n");
1854 db_printf("none\n");