2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
38 #include <sys/limits.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/lockstat.h>
43 #include <sys/mutex.h>
45 #include <sys/sleepqueue.h>
47 #include <sys/stack.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
52 #include <machine/cpu.h>
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
64 * Hack. There should be prio_t or similar so that this is not necessary.
66 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
67 "prio flags wont fit in u_short pri in struct lock");
69 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
70 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
72 #define SQ_EXCLUSIVE_QUEUE 0
73 #define SQ_SHARED_QUEUE 1
76 #define _lockmgr_assert(lk, what, file, line)
79 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
80 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
83 #define STACK_PRINT(lk)
84 #define STACK_SAVE(lk)
85 #define STACK_ZERO(lk)
87 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
88 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
89 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
92 #define LOCK_LOG2(lk, string, arg1, arg2) \
93 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
94 CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
96 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
97 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
99 #define GIANT_DECLARE \
101 WITNESS_SAVE_DECL(Giant)
102 #define GIANT_RESTORE() do { \
103 if (__predict_false(_i > 0)) { \
106 WITNESS_RESTORE(&Giant.lock_object, Giant); \
109 #define GIANT_SAVE() do { \
110 if (__predict_false(mtx_owned(&Giant))) { \
111 WITNESS_SAVE(&Giant.lock_object, Giant); \
112 while (mtx_owned(&Giant)) { \
114 mtx_unlock(&Giant); \
119 static bool __always_inline
120 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
123 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
126 if (fp || (!(x & LK_SHARE)))
128 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
129 (curthread->td_pflags & TDP_DEADLKTREAT))
134 #define LK_TRYOP(x) \
137 #define LK_CAN_WITNESS(x) \
138 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
139 #define LK_TRYWIT(x) \
140 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
142 #define lockmgr_disowned(lk) \
143 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
145 #define lockmgr_xlocked_v(v) \
146 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
148 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
150 static void assert_lockmgr(const struct lock_object *lock, int how);
152 static void db_show_lockmgr(const struct lock_object *lock);
154 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
156 static int owner_lockmgr(const struct lock_object *lock,
157 struct thread **owner);
159 static uintptr_t unlock_lockmgr(struct lock_object *lock);
161 struct lock_class lock_class_lockmgr = {
162 .lc_name = "lockmgr",
163 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
164 .lc_assert = assert_lockmgr,
166 .lc_ddb_show = db_show_lockmgr,
168 .lc_lock = lock_lockmgr,
169 .lc_unlock = unlock_lockmgr,
171 .lc_owner = owner_lockmgr,
175 static __read_mostly bool lk_adaptive = true;
176 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
177 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
179 #define lockmgr_delay locks_delay
181 struct lockmgr_wait {
187 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
189 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
192 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
194 struct lock_class *class;
196 if (flags & LK_INTERLOCK) {
197 class = LOCK_CLASS(ilk);
198 class->lc_unlock(ilk);
201 if (__predict_false(wakeup_swapper))
206 lockmgr_note_shared_acquire(struct lock *lk, int contested,
207 uint64_t waittime, const char *file, int line, int flags)
210 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
211 waittime, file, line, LOCKSTAT_READER);
212 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
213 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
214 TD_LOCKS_INC(curthread);
215 TD_SLOCKS_INC(curthread);
220 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
223 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
224 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
225 TD_LOCKS_DEC(curthread);
226 TD_SLOCKS_DEC(curthread);
230 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
231 uint64_t waittime, const char *file, int line, int flags)
234 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
235 waittime, file, line, LOCKSTAT_WRITER);
236 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
237 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
239 TD_LOCKS_INC(curthread);
244 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
247 if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
248 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
249 TD_LOCKS_DEC(curthread);
251 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
255 static __inline struct thread *
256 lockmgr_xholder(const struct lock *lk)
260 x = lockmgr_read_value(lk);
261 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
265 * It assumes sleepq_lock held and returns with this one unheld.
266 * It also assumes the generic interlock is sane and previously checked.
267 * If LK_INTERLOCK is specified the interlock is not reacquired after the
271 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
272 const char *wmesg, int pri, int timo, int queue)
275 struct lock_class *class;
278 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
279 catch = pri & PCATCH;
283 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
284 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
286 if (flags & LK_INTERLOCK)
287 class->lc_unlock(ilk);
288 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
289 if (lk->lk_exslpfail < USHRT_MAX)
293 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
294 SLEEPQ_INTERRUPTIBLE : 0), queue);
295 if ((flags & LK_TIMELOCK) && timo)
296 sleepq_set_timeout(&lk->lock_object, timo);
299 * Decisional switch for real sleeping.
301 if ((flags & LK_TIMELOCK) && timo && catch)
302 error = sleepq_timedwait_sig(&lk->lock_object, pri);
303 else if ((flags & LK_TIMELOCK) && timo)
304 error = sleepq_timedwait(&lk->lock_object, pri);
306 error = sleepq_wait_sig(&lk->lock_object, pri);
308 sleepq_wait(&lk->lock_object, pri);
310 if ((flags & LK_SLEEPFAIL) && error == 0)
317 wakeupshlk(struct lock *lk, const char *file, int line)
319 uintptr_t v, x, orig_x;
321 int queue, wakeup_swapper;
325 x = lockmgr_read_value(lk);
326 if (lockmgr_sunlock_try(lk, &x))
330 * We should have a sharer with waiters, so enter the hard
331 * path in order to handle wakeups correctly.
333 sleepq_lock(&lk->lock_object);
334 orig_x = lockmgr_read_value(lk);
336 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
340 * If the lock has exclusive waiters, give them preference in
341 * order to avoid deadlock with shared runners up.
342 * If interruptible sleeps left the exclusive queue empty
343 * avoid a starvation for the threads sleeping on the shared
344 * queue by giving them precedence and cleaning up the
345 * exclusive waiters bit anyway.
346 * Please note that lk_exslpfail count may be lying about
347 * the real number of waiters with the LK_SLEEPFAIL flag on
348 * because they may be used in conjunction with interruptible
349 * sleeps so lk_exslpfail might be considered an 'upper limit'
350 * bound, including the edge cases.
352 realexslp = sleepq_sleepcnt(&lk->lock_object,
354 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
355 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
356 lk->lk_exslpfail = 0;
357 queue = SQ_EXCLUSIVE_QUEUE;
358 v |= (x & LK_SHARED_WAITERS);
360 lk->lk_exslpfail = 0;
362 "%s: %p has only LK_SLEEPFAIL sleepers",
365 "%s: %p waking up threads on the exclusive queue",
368 sleepq_broadcast(&lk->lock_object,
369 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
370 queue = SQ_SHARED_QUEUE;
374 * Exclusive waiters sleeping with LK_SLEEPFAIL on
375 * and using interruptible sleeps/timeout may have
376 * left spourious lk_exslpfail counts on, so clean
379 lk->lk_exslpfail = 0;
380 queue = SQ_SHARED_QUEUE;
383 if (lockmgr_sunlock_try(lk, &orig_x)) {
384 sleepq_release(&lk->lock_object);
388 x |= LK_SHARERS_LOCK(1);
389 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
393 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
394 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
396 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
398 sleepq_release(&lk->lock_object);
402 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
403 return (wakeup_swapper);
407 assert_lockmgr(const struct lock_object *lock, int what)
410 panic("lockmgr locks do not support assertions");
414 lock_lockmgr(struct lock_object *lock, uintptr_t how)
417 panic("lockmgr locks do not support sleep interlocking");
421 unlock_lockmgr(struct lock_object *lock)
424 panic("lockmgr locks do not support sleep interlocking");
429 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
432 panic("lockmgr locks do not support owner inquiring");
437 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
441 MPASS((flags & ~LK_INIT_MASK) == 0);
442 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
443 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
446 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
447 if (flags & LK_CANRECURSE)
448 iflags |= LO_RECURSABLE;
449 if ((flags & LK_NODUP) == 0)
451 if (flags & LK_NOPROFILE)
452 iflags |= LO_NOPROFILE;
453 if ((flags & LK_NOWITNESS) == 0)
454 iflags |= LO_WITNESS;
455 if (flags & LK_QUIET)
457 if (flags & LK_IS_VNODE)
458 iflags |= LO_IS_VNODE;
461 iflags |= flags & LK_NOSHARE;
463 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
464 lk->lk_lock = LK_UNLOCKED;
466 lk->lk_exslpfail = 0;
473 * XXX: Gross hacks to manipulate external lock flags after
474 * initialization. Used for certain vnode and buf locks.
477 lockallowshare(struct lock *lk)
480 lockmgr_assert(lk, KA_XLOCKED);
481 lk->lock_object.lo_flags &= ~LK_NOSHARE;
485 lockdisableshare(struct lock *lk)
488 lockmgr_assert(lk, KA_XLOCKED);
489 lk->lock_object.lo_flags |= LK_NOSHARE;
493 lockallowrecurse(struct lock *lk)
496 lockmgr_assert(lk, KA_XLOCKED);
497 lk->lock_object.lo_flags |= LO_RECURSABLE;
501 lockdisablerecurse(struct lock *lk)
504 lockmgr_assert(lk, KA_XLOCKED);
505 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
509 lockdestroy(struct lock *lk)
512 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
513 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
514 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
515 lock_destroy(&lk->lock_object);
518 static bool __always_inline
519 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
523 * If no other thread has an exclusive lock, or
524 * no exclusive waiter is present, bump the count of
525 * sharers. Since we have to preserve the state of
526 * waiters, if we fail to acquire the shared lock
527 * loop back and retry.
529 while (LK_CAN_SHARE(*xp, flags, fp)) {
530 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
531 *xp + LK_ONE_SHARER)) {
538 static bool __always_inline
539 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
543 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
544 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
545 *xp - LK_ONE_SHARER))
555 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
558 struct thread *owner;
562 MPASS(x != LK_UNLOCKED);
563 owner = (struct thread *)LK_HOLDER(x);
565 MPASS(owner != curthread);
566 if (owner == (struct thread *)LK_KERNPROC)
568 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
572 if (!TD_IS_RUNNING(owner))
574 if ((x & LK_ALL_WAITERS) != 0)
577 x = lockmgr_read_value(lk);
578 if (LK_CAN_SHARE(x, flags, false)) {
582 owner = (struct thread *)LK_HOLDER(x);
586 static __noinline int
587 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
588 const char *file, int line, struct lockmgr_wait *lwa)
596 uint64_t sleep_time = 0;
598 #ifdef LOCK_PROFILING
599 uint64_t waittime = 0;
602 struct lock_delay_arg lda;
604 if (SCHEDULER_STOPPED())
607 tid = (uintptr_t)curthread;
609 if (LK_CAN_WITNESS(flags))
610 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
611 file, line, flags & LK_INTERLOCK ? ilk : NULL);
612 x = lockmgr_read_value(lk);
613 lock_delay_arg_init(&lda, &lockmgr_delay);
615 flags &= ~LK_ADAPTIVE;
617 * The lock may already be locked exclusive by curthread,
620 if (LK_HOLDER(x) == tid) {
622 "%s: %p already held in exclusive mode",
629 if (lockmgr_slock_try(lk, &x, flags, false))
632 lock_profile_obtain_lock_failed(&lk->lock_object, false,
633 &contested, &waittime);
635 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
636 if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
641 PMC_SOFT_CALL( , , lock, failed);
645 * If the lock is expected to not sleep just give up
648 if (LK_TRYOP(flags)) {
649 LOCK_LOG2(lk, "%s: %p fails the try operation",
656 * Acquire the sleepqueue chain lock because we
657 * probabilly will need to manipulate waiters flags.
659 sleepq_lock(&lk->lock_object);
660 x = lockmgr_read_value(lk);
664 * if the lock can be acquired in shared mode, try
667 if (LK_CAN_SHARE(x, flags, false)) {
668 sleepq_release(&lk->lock_object);
673 * Try to set the LK_SHARED_WAITERS flag. If we fail,
674 * loop back and retry.
676 if ((x & LK_SHARED_WAITERS) == 0) {
677 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
678 x | LK_SHARED_WAITERS)) {
681 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
686 iwmesg = lk->lock_object.lo_name;
690 iwmesg = lwa->iwmesg;
696 * As far as we have been unable to acquire the
697 * shared lock and the shared waiters flag is set,
701 sleep_time -= lockstat_nsecs(&lk->lock_object);
703 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
706 sleep_time += lockstat_nsecs(&lk->lock_object);
708 flags &= ~LK_INTERLOCK;
711 "%s: interrupted sleep for %p with %d",
712 __func__, lk, error);
715 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
717 x = lockmgr_read_value(lk);
722 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
723 LOCKSTAT_READER, (x & LK_SHARE) == 0,
724 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
726 #ifdef LOCK_PROFILING
727 lockmgr_note_shared_acquire(lk, contested, waittime,
730 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
736 lockmgr_exit(flags, ilk, 0);
741 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
743 struct thread *owner;
747 MPASS(x != LK_UNLOCKED);
748 owner = (struct thread *)LK_HOLDER(x);
750 MPASS(owner != curthread);
753 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
755 if (owner == (struct thread *)LK_KERNPROC)
757 if (!TD_IS_RUNNING(owner))
759 if ((x & LK_ALL_WAITERS) != 0)
762 x = lockmgr_read_value(lk);
763 if (x == LK_UNLOCKED) {
767 owner = (struct thread *)LK_HOLDER(x);
771 static __noinline int
772 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
773 const char *file, int line, struct lockmgr_wait *lwa)
775 struct lock_class *class;
782 uint64_t sleep_time = 0;
784 #ifdef LOCK_PROFILING
785 uint64_t waittime = 0;
788 struct lock_delay_arg lda;
790 if (SCHEDULER_STOPPED())
793 tid = (uintptr_t)curthread;
795 if (LK_CAN_WITNESS(flags))
796 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
797 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
801 * If curthread already holds the lock and this one is
802 * allowed to recurse, simply recurse on it.
804 if (lockmgr_xlocked(lk)) {
805 if ((flags & LK_CANRECURSE) == 0 &&
806 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
808 * If the lock is expected to not panic just
809 * give up and return.
811 if (LK_TRYOP(flags)) {
813 "%s: %p fails the try operation",
818 if (flags & LK_INTERLOCK) {
819 class = LOCK_CLASS(ilk);
820 class->lc_unlock(ilk);
823 panic("%s: recursing on non recursive lockmgr %p "
824 "@ %s:%d\n", __func__, lk, file, line);
826 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
828 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
829 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
830 lk->lk_recurse, file, line);
831 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
832 LK_TRYWIT(flags), file, line);
833 TD_LOCKS_INC(curthread);
838 lock_delay_arg_init(&lda, &lockmgr_delay);
840 flags &= ~LK_ADAPTIVE;
842 if (x == LK_UNLOCKED) {
843 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
848 lock_profile_obtain_lock_failed(&lk->lock_object, false,
849 &contested, &waittime);
851 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
852 if (lockmgr_xlock_adaptive(&lda, lk, &x))
856 PMC_SOFT_CALL( , , lock, failed);
860 * If the lock is expected to not sleep just give up
863 if (LK_TRYOP(flags)) {
864 LOCK_LOG2(lk, "%s: %p fails the try operation",
871 * Acquire the sleepqueue chain lock because we
872 * probabilly will need to manipulate waiters flags.
874 sleepq_lock(&lk->lock_object);
875 x = lockmgr_read_value(lk);
879 * if the lock has been released while we spun on
880 * the sleepqueue chain lock just try again.
882 if (x == LK_UNLOCKED) {
883 sleepq_release(&lk->lock_object);
888 * The lock can be in the state where there is a
889 * pending queue of waiters, but still no owner.
890 * This happens when the lock is contested and an
891 * owner is going to claim the lock.
892 * If curthread is the one successfully acquiring it
893 * claim lock ownership and return, preserving waiters
896 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
897 if ((x & ~v) == LK_UNLOCKED) {
898 v &= ~LK_EXCLUSIVE_SPINNERS;
899 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
901 sleepq_release(&lk->lock_object);
903 "%s: %p claimed by a new writer",
911 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
912 * fail, loop back and retry.
914 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
915 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
916 x | LK_EXCLUSIVE_WAITERS)) {
919 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
924 iwmesg = lk->lock_object.lo_name;
928 iwmesg = lwa->iwmesg;
934 * As far as we have been unable to acquire the
935 * exclusive lock and the exclusive waiters flag
936 * is set, we will sleep.
939 sleep_time -= lockstat_nsecs(&lk->lock_object);
941 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
944 sleep_time += lockstat_nsecs(&lk->lock_object);
946 flags &= ~LK_INTERLOCK;
949 "%s: interrupted sleep for %p with %d",
950 __func__, lk, error);
953 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
955 x = lockmgr_read_value(lk);
960 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
961 LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
962 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
964 #ifdef LOCK_PROFILING
965 lockmgr_note_exclusive_acquire(lk, contested, waittime,
968 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
974 lockmgr_exit(flags, ilk, 0);
978 static __noinline int
979 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
980 const char *file, int line, struct lockmgr_wait *lwa)
982 uintptr_t tid, v, setv;
986 if (SCHEDULER_STOPPED())
989 tid = (uintptr_t)curthread;
991 _lockmgr_assert(lk, KA_SLOCKED, file, line);
993 op = flags & LK_TYPE_MASK;
994 v = lockmgr_read_value(lk);
996 if (LK_SHARERS(v) > 1) {
997 if (op == LK_TRYUPGRADE) {
998 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
1003 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1004 v - LK_ONE_SHARER)) {
1005 lockmgr_note_shared_release(lk, file, line);
1010 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1013 setv |= (v & LK_ALL_WAITERS);
1016 * Try to switch from one shared lock to an exclusive one.
1017 * We need to preserve waiters flags during the operation.
1019 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1020 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1022 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1023 LK_TRYWIT(flags), file, line);
1024 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1025 TD_SLOCKS_DEC(curthread);
1031 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1032 flags &= ~LK_INTERLOCK;
1034 lockmgr_exit(flags, ilk, 0);
1039 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1040 const char *file, int line)
1042 struct lock_class *class;
1047 if (SCHEDULER_STOPPED())
1050 op = flags & LK_TYPE_MASK;
1054 if (LK_CAN_WITNESS(flags))
1055 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1056 file, line, flags & LK_INTERLOCK ? ilk : NULL);
1057 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1059 x = lockmgr_read_value(lk);
1060 if (lockmgr_slock_try(lk, &x, flags, true)) {
1061 lockmgr_note_shared_acquire(lk, 0, 0,
1065 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1070 if (LK_CAN_WITNESS(flags))
1071 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1072 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1074 tid = (uintptr_t)curthread;
1075 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1076 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1077 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1081 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1087 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1091 if (__predict_true(locked)) {
1092 if (__predict_false(flags & LK_INTERLOCK)) {
1093 class = LOCK_CLASS(ilk);
1094 class->lc_unlock(ilk);
1098 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1099 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1103 static __noinline int
1104 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1105 const char *file, int line)
1108 int wakeup_swapper = 0;
1110 if (SCHEDULER_STOPPED())
1113 wakeup_swapper = wakeupshlk(lk, file, line);
1116 lockmgr_exit(flags, ilk, wakeup_swapper);
1120 static __noinline int
1121 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1122 const char *file, int line)
1125 int wakeup_swapper = 0;
1129 if (SCHEDULER_STOPPED())
1132 tid = (uintptr_t)curthread;
1135 * As first option, treact the lock as if it has not
1137 * Fix-up the tid var if the lock has been disowned.
1139 if (LK_HOLDER(x) == LK_KERNPROC)
1143 * The lock is held in exclusive mode.
1144 * If the lock is recursed also, then unrecurse it.
1146 if (lockmgr_recursed_v(x)) {
1147 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1149 if (lk->lk_recurse == 0)
1150 atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1153 if (tid != LK_KERNPROC)
1154 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1157 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1160 sleepq_lock(&lk->lock_object);
1161 x = lockmgr_read_value(lk);
1165 * If the lock has exclusive waiters, give them
1166 * preference in order to avoid deadlock with
1167 * shared runners up.
1168 * If interruptible sleeps left the exclusive queue
1169 * empty avoid a starvation for the threads sleeping
1170 * on the shared queue by giving them precedence
1171 * and cleaning up the exclusive waiters bit anyway.
1172 * Please note that lk_exslpfail count may be lying
1173 * about the real number of waiters with the
1174 * LK_SLEEPFAIL flag on because they may be used in
1175 * conjunction with interruptible sleeps so
1176 * lk_exslpfail might be considered an 'upper limit'
1177 * bound, including the edge cases.
1179 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1180 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1181 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1182 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1183 lk->lk_exslpfail = 0;
1184 queue = SQ_EXCLUSIVE_QUEUE;
1185 v |= (x & LK_SHARED_WAITERS);
1187 lk->lk_exslpfail = 0;
1189 "%s: %p has only LK_SLEEPFAIL sleepers",
1192 "%s: %p waking up threads on the exclusive queue",
1194 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1195 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1196 queue = SQ_SHARED_QUEUE;
1200 * Exclusive waiters sleeping with LK_SLEEPFAIL
1201 * on and using interruptible sleeps/timeout
1202 * may have left spourious lk_exslpfail counts
1203 * on, so clean it up anyway.
1205 lk->lk_exslpfail = 0;
1206 queue = SQ_SHARED_QUEUE;
1209 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1210 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1212 atomic_store_rel_ptr(&lk->lk_lock, v);
1213 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1214 sleepq_release(&lk->lock_object);
1217 lockmgr_exit(flags, ilk, wakeup_swapper);
1222 * Lightweight entry points for common operations.
1224 * Functionality is similar to sx locks, in that none of the additional lockmgr
1225 * features are supported. To be clear, these are NOT supported:
1226 * 1. shared locking disablement
1227 * 2. returning with an error after sleep
1228 * 3. unlocking the interlock
1230 * If in doubt, use lockmgr_lock_flags.
1233 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1237 MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1238 MPASS((flags & LK_INTERLOCK) == 0);
1239 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1241 if (LK_CAN_WITNESS(flags))
1242 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1244 x = lockmgr_read_value(lk);
1245 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1246 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1250 return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1254 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1258 MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1259 MPASS((flags & LK_INTERLOCK) == 0);
1261 if (LK_CAN_WITNESS(flags))
1262 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1263 LOP_EXCLUSIVE, file, line, NULL);
1264 tid = (uintptr_t)curthread;
1265 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1266 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1271 return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1275 lockmgr_unlock(struct lock *lk)
1284 _lockmgr_assert(lk, KA_LOCKED, file, line);
1285 x = lockmgr_read_value(lk);
1286 if (__predict_true(x & LK_SHARE) != 0) {
1287 lockmgr_note_shared_release(lk, file, line);
1288 if (lockmgr_sunlock_try(lk, &x)) {
1289 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1291 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1294 tid = (uintptr_t)curthread;
1295 lockmgr_note_exclusive_release(lk, file, line);
1296 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1297 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1299 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1306 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1307 const char *wmesg, int pri, int timo, const char *file, int line)
1310 struct lockmgr_wait lwa;
1311 struct lock_class *class;
1313 uintptr_t tid, v, x;
1314 u_int op, realexslp;
1315 int error, ipri, itimo, queue, wakeup_swapper;
1316 #ifdef LOCK_PROFILING
1317 uint64_t waittime = 0;
1321 if (SCHEDULER_STOPPED())
1325 tid = (uintptr_t)curthread;
1326 op = (flags & LK_TYPE_MASK);
1327 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1328 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1329 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1331 lwa.iwmesg = iwmesg;
1335 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1336 KASSERT((op & (op - 1)) == 0,
1337 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1338 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1339 (op != LK_DOWNGRADE && op != LK_RELEASE),
1340 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1341 __func__, file, line));
1342 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1343 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1344 __func__, file, line));
1345 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1346 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1347 lk->lock_object.lo_name, file, line));
1349 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1351 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1359 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1361 if (flags & LK_INTERLOCK)
1362 class->lc_unlock(ilk);
1370 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1374 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1377 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1380 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1381 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1384 * Panic if the lock is recursed.
1386 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1387 if (flags & LK_INTERLOCK)
1388 class->lc_unlock(ilk);
1389 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1390 __func__, iwmesg, file, line);
1392 TD_SLOCKS_INC(curthread);
1395 * In order to preserve waiters flags, just spin.
1398 x = lockmgr_read_value(lk);
1399 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1400 x &= LK_ALL_WAITERS;
1401 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1402 LK_SHARERS_LOCK(1) | x))
1406 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1407 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1410 _lockmgr_assert(lk, KA_LOCKED, file, line);
1411 x = lockmgr_read_value(lk);
1413 if (__predict_true(x & LK_SHARE) != 0) {
1414 lockmgr_note_shared_release(lk, file, line);
1415 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1417 lockmgr_note_exclusive_release(lk, file, line);
1418 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1422 if (LK_CAN_WITNESS(flags))
1423 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1424 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1428 * Trying to drain a lock we already own will result in a
1431 if (lockmgr_xlocked(lk)) {
1432 if (flags & LK_INTERLOCK)
1433 class->lc_unlock(ilk);
1434 panic("%s: draining %s with the lock held @ %s:%d\n",
1435 __func__, iwmesg, file, line);
1439 if (lk->lk_lock == LK_UNLOCKED &&
1440 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1444 PMC_SOFT_CALL( , , lock, failed);
1446 lock_profile_obtain_lock_failed(&lk->lock_object, false,
1447 &contested, &waittime);
1450 * If the lock is expected to not sleep just give up
1453 if (LK_TRYOP(flags)) {
1454 LOCK_LOG2(lk, "%s: %p fails the try operation",
1461 * Acquire the sleepqueue chain lock because we
1462 * probabilly will need to manipulate waiters flags.
1464 sleepq_lock(&lk->lock_object);
1465 x = lockmgr_read_value(lk);
1468 * if the lock has been released while we spun on
1469 * the sleepqueue chain lock just try again.
1471 if (x == LK_UNLOCKED) {
1472 sleepq_release(&lk->lock_object);
1476 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1477 if ((x & ~v) == LK_UNLOCKED) {
1478 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1481 * If interruptible sleeps left the exclusive
1482 * queue empty avoid a starvation for the
1483 * threads sleeping on the shared queue by
1484 * giving them precedence and cleaning up the
1485 * exclusive waiters bit anyway.
1486 * Please note that lk_exslpfail count may be
1487 * lying about the real number of waiters with
1488 * the LK_SLEEPFAIL flag on because they may
1489 * be used in conjunction with interruptible
1490 * sleeps so lk_exslpfail might be considered
1491 * an 'upper limit' bound, including the edge
1494 if (v & LK_EXCLUSIVE_WAITERS) {
1495 queue = SQ_EXCLUSIVE_QUEUE;
1496 v &= ~LK_EXCLUSIVE_WAITERS;
1499 * Exclusive waiters sleeping with
1500 * LK_SLEEPFAIL on and using
1501 * interruptible sleeps/timeout may
1502 * have left spourious lk_exslpfail
1503 * counts on, so clean it up anyway.
1505 MPASS(v & LK_SHARED_WAITERS);
1506 lk->lk_exslpfail = 0;
1507 queue = SQ_SHARED_QUEUE;
1508 v &= ~LK_SHARED_WAITERS;
1510 if (queue == SQ_EXCLUSIVE_QUEUE) {
1512 sleepq_sleepcnt(&lk->lock_object,
1513 SQ_EXCLUSIVE_QUEUE);
1514 if (lk->lk_exslpfail >= realexslp) {
1515 lk->lk_exslpfail = 0;
1516 queue = SQ_SHARED_QUEUE;
1517 v &= ~LK_SHARED_WAITERS;
1518 if (realexslp != 0) {
1520 "%s: %p has only LK_SLEEPFAIL sleepers",
1523 "%s: %p waking up threads on the exclusive queue",
1529 SQ_EXCLUSIVE_QUEUE);
1532 lk->lk_exslpfail = 0;
1534 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1535 sleepq_release(&lk->lock_object);
1539 "%s: %p waking up all threads on the %s queue",
1540 __func__, lk, queue == SQ_SHARED_QUEUE ?
1541 "shared" : "exclusive");
1542 wakeup_swapper |= sleepq_broadcast(
1543 &lk->lock_object, SLEEPQ_LK, 0, queue);
1546 * If shared waiters have been woken up we need
1547 * to wait for one of them to acquire the lock
1548 * before to set the exclusive waiters in
1549 * order to avoid a deadlock.
1551 if (queue == SQ_SHARED_QUEUE) {
1552 for (v = lk->lk_lock;
1553 (v & LK_SHARE) && !LK_SHARERS(v);
1560 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1561 * fail, loop back and retry.
1563 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1564 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1565 x | LK_EXCLUSIVE_WAITERS)) {
1566 sleepq_release(&lk->lock_object);
1569 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1574 * As far as we have been unable to acquire the
1575 * exclusive lock and the exclusive waiters flag
1576 * is set, we will sleep.
1578 if (flags & LK_INTERLOCK) {
1579 class->lc_unlock(ilk);
1580 flags &= ~LK_INTERLOCK;
1583 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1584 SQ_EXCLUSIVE_QUEUE);
1585 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1587 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1592 lock_profile_obtain_lock_success(&lk->lock_object,
1593 false, contested, waittime, file, line);
1594 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1595 lk->lk_recurse, file, line);
1596 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1597 LK_TRYWIT(flags), file, line);
1598 TD_LOCKS_INC(curthread);
1603 if (flags & LK_INTERLOCK)
1604 class->lc_unlock(ilk);
1605 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1608 if (flags & LK_INTERLOCK)
1609 class->lc_unlock(ilk);
1617 _lockmgr_disown(struct lock *lk, const char *file, int line)
1621 if (SCHEDULER_STOPPED())
1624 tid = (uintptr_t)curthread;
1625 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1628 * Panic if the lock is recursed.
1630 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1631 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1632 __func__, file, line);
1635 * If the owner is already LK_KERNPROC just skip the whole operation.
1637 if (LK_HOLDER(lk->lk_lock) != tid)
1639 lock_profile_release_lock(&lk->lock_object, false);
1640 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1641 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1642 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1643 TD_LOCKS_DEC(curthread);
1647 * In order to preserve waiters flags, just spin.
1650 x = lockmgr_read_value(lk);
1651 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1652 x &= LK_ALL_WAITERS;
1653 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1661 lockmgr_printinfo(const struct lock *lk)
1666 if (lk->lk_lock == LK_UNLOCKED)
1667 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1668 else if (lk->lk_lock & LK_SHARE)
1669 printf("lock type %s: SHARED (count %ju)\n",
1670 lk->lock_object.lo_name,
1671 (uintmax_t)LK_SHARERS(lk->lk_lock));
1673 td = lockmgr_xholder(lk);
1674 if (td == (struct thread *)LK_KERNPROC)
1675 printf("lock type %s: EXCL by KERNPROC\n",
1676 lk->lock_object.lo_name);
1678 printf("lock type %s: EXCL by thread %p "
1679 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1680 td, td->td_proc->p_pid, td->td_proc->p_comm,
1685 if (x & LK_EXCLUSIVE_WAITERS)
1686 printf(" with exclusive waiters pending\n");
1687 if (x & LK_SHARED_WAITERS)
1688 printf(" with shared waiters pending\n");
1689 if (x & LK_EXCLUSIVE_SPINNERS)
1690 printf(" with exclusive spinners pending\n");
1696 lockstatus(const struct lock *lk)
1702 x = lockmgr_read_value(lk);
1705 if ((x & LK_SHARE) == 0) {
1706 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1710 } else if (x == LK_UNLOCKED)
1716 #ifdef INVARIANT_SUPPORT
1718 FEATURE(invariant_support,
1719 "Support for modules compiled with INVARIANTS option");
1722 #undef _lockmgr_assert
1726 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1730 if (SCHEDULER_STOPPED())
1734 case KA_SLOCKED | KA_NOTRECURSED:
1735 case KA_SLOCKED | KA_RECURSED:
1738 case KA_LOCKED | KA_NOTRECURSED:
1739 case KA_LOCKED | KA_RECURSED:
1743 * We cannot trust WITNESS if the lock is held in exclusive
1744 * mode and a call to lockmgr_disown() happened.
1745 * Workaround this skipping the check if the lock is held in
1746 * exclusive mode even for the KA_LOCKED case.
1748 if (slocked || (lk->lk_lock & LK_SHARE)) {
1749 witness_assert(&lk->lock_object, what, file, line);
1753 if (lk->lk_lock == LK_UNLOCKED ||
1754 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1755 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1756 panic("Lock %s not %slocked @ %s:%d\n",
1757 lk->lock_object.lo_name, slocked ? "share" : "",
1760 if ((lk->lk_lock & LK_SHARE) == 0) {
1761 if (lockmgr_recursed(lk)) {
1762 if (what & KA_NOTRECURSED)
1763 panic("Lock %s recursed @ %s:%d\n",
1764 lk->lock_object.lo_name, file,
1766 } else if (what & KA_RECURSED)
1767 panic("Lock %s not recursed @ %s:%d\n",
1768 lk->lock_object.lo_name, file, line);
1772 case KA_XLOCKED | KA_NOTRECURSED:
1773 case KA_XLOCKED | KA_RECURSED:
1774 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1775 panic("Lock %s not exclusively locked @ %s:%d\n",
1776 lk->lock_object.lo_name, file, line);
1777 if (lockmgr_recursed(lk)) {
1778 if (what & KA_NOTRECURSED)
1779 panic("Lock %s recursed @ %s:%d\n",
1780 lk->lock_object.lo_name, file, line);
1781 } else if (what & KA_RECURSED)
1782 panic("Lock %s not recursed @ %s:%d\n",
1783 lk->lock_object.lo_name, file, line);
1786 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1787 panic("Lock %s exclusively locked @ %s:%d\n",
1788 lk->lock_object.lo_name, file, line);
1791 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1799 lockmgr_chain(struct thread *td, struct thread **ownerp)
1801 const struct lock *lk;
1805 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1807 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1808 if (lk->lk_lock & LK_SHARE)
1809 db_printf("SHARED (count %ju)\n",
1810 (uintmax_t)LK_SHARERS(lk->lk_lock));
1812 db_printf("EXCL\n");
1813 *ownerp = lockmgr_xholder(lk);
1819 db_show_lockmgr(const struct lock_object *lock)
1822 const struct lock *lk;
1824 lk = (const struct lock *)lock;
1826 db_printf(" state: ");
1827 if (lk->lk_lock == LK_UNLOCKED)
1828 db_printf("UNLOCKED\n");
1829 else if (lk->lk_lock & LK_SHARE)
1830 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1832 td = lockmgr_xholder(lk);
1833 if (td == (struct thread *)LK_KERNPROC)
1834 db_printf("XLOCK: LK_KERNPROC\n");
1836 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1837 td->td_tid, td->td_proc->p_pid,
1838 td->td_proc->p_comm);
1839 if (lockmgr_recursed(lk))
1840 db_printf(" recursed: %d\n", lk->lk_recurse);
1842 db_printf(" waiters: ");
1843 switch (lk->lk_lock & LK_ALL_WAITERS) {
1844 case LK_SHARED_WAITERS:
1845 db_printf("shared\n");
1847 case LK_EXCLUSIVE_WAITERS:
1848 db_printf("exclusive\n");
1850 case LK_ALL_WAITERS:
1851 db_printf("shared and exclusive\n");
1854 db_printf("none\n");
1856 db_printf(" spinners: ");
1857 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1858 db_printf("exclusive\n");
1860 db_printf("none\n");