2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
30 #include "opt_hwpmc_hooks.h"
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/mutex.h>
43 #include <sys/sleepqueue.h>
45 #include <sys/stack.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
50 #include <machine/cpu.h>
57 #include <sys/pmckern.h>
58 PMC_SOFT_DECLARE( , , lock, failed);
61 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
62 (LK_ADAPTIVE | LK_NOSHARE));
63 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
64 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66 #define SQ_EXCLUSIVE_QUEUE 0
67 #define SQ_SHARED_QUEUE 1
70 #define _lockmgr_assert(lk, what, file, line)
73 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
74 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
77 #define STACK_PRINT(lk)
78 #define STACK_SAVE(lk)
79 #define STACK_ZERO(lk)
81 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
82 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
83 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
86 #define LOCK_LOG2(lk, string, arg1, arg2) \
87 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
88 CTR2(KTR_LOCK, (string), (arg1), (arg2))
89 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
90 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
91 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93 #define GIANT_DECLARE \
95 WITNESS_SAVE_DECL(Giant)
96 #define GIANT_RESTORE() do { \
100 WITNESS_RESTORE(&Giant.lock_object, Giant); \
103 #define GIANT_SAVE() do { \
104 if (mtx_owned(&Giant)) { \
105 WITNESS_SAVE(&Giant.lock_object, Giant); \
106 while (mtx_owned(&Giant)) { \
108 mtx_unlock(&Giant); \
113 #define LK_CAN_SHARE(x, flags) \
114 (((x) & LK_SHARE) && \
115 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
116 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \
117 (curthread->td_pflags & TDP_DEADLKTREAT)))
118 #define LK_TRYOP(x) \
121 #define LK_CAN_WITNESS(x) \
122 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
123 #define LK_TRYWIT(x) \
124 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
126 #define LK_CAN_ADAPT(lk, f) \
127 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
128 ((f) & LK_SLEEPFAIL) == 0)
130 #define lockmgr_disowned(lk) \
131 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
133 #define lockmgr_xlocked(lk) \
134 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
136 static void assert_lockmgr(const struct lock_object *lock, int how);
138 static void db_show_lockmgr(const struct lock_object *lock);
140 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
142 static int owner_lockmgr(const struct lock_object *lock,
143 struct thread **owner);
145 static uintptr_t unlock_lockmgr(struct lock_object *lock);
147 struct lock_class lock_class_lockmgr = {
148 .lc_name = "lockmgr",
149 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
150 .lc_assert = assert_lockmgr,
152 .lc_ddb_show = db_show_lockmgr,
154 .lc_lock = lock_lockmgr,
155 .lc_unlock = unlock_lockmgr,
157 .lc_owner = owner_lockmgr,
161 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
163 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t x);
166 lockmgr_note_shared_acquire(struct lock *lk, int contested,
167 uint64_t waittime, const char *file, int line, int flags)
170 lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
172 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
173 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
174 TD_LOCKS_INC(curthread);
175 TD_SLOCKS_INC(curthread);
180 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
183 lock_profile_release_lock(&lk->lock_object);
184 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
185 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
186 TD_LOCKS_DEC(curthread);
187 TD_SLOCKS_DEC(curthread);
191 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
192 uint64_t waittime, const char *file, int line, int flags)
195 lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
197 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
198 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
200 TD_LOCKS_INC(curthread);
205 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
208 lock_profile_release_lock(&lk->lock_object);
209 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
211 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
212 TD_LOCKS_DEC(curthread);
216 lockmgr_note_exclusive_upgrade(struct lock *lk, const char *file, int line,
220 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
222 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
223 LK_TRYWIT(flags), file, line);
224 TD_SLOCKS_DEC(curthread);
227 static __inline struct thread *
228 lockmgr_xholder(const struct lock *lk)
233 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
237 * It assumes sleepq_lock held and returns with this one unheld.
238 * It also assumes the generic interlock is sane and previously checked.
239 * If LK_INTERLOCK is specified the interlock is not reacquired after the
243 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
244 const char *wmesg, int pri, int timo, int queue)
247 struct lock_class *class;
250 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
251 catch = pri & PCATCH;
255 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
256 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
258 if (flags & LK_INTERLOCK)
259 class->lc_unlock(ilk);
260 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
263 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
264 SLEEPQ_INTERRUPTIBLE : 0), queue);
265 if ((flags & LK_TIMELOCK) && timo)
266 sleepq_set_timeout(&lk->lock_object, timo);
269 * Decisional switch for real sleeping.
271 if ((flags & LK_TIMELOCK) && timo && catch)
272 error = sleepq_timedwait_sig(&lk->lock_object, pri);
273 else if ((flags & LK_TIMELOCK) && timo)
274 error = sleepq_timedwait(&lk->lock_object, pri);
276 error = sleepq_wait_sig(&lk->lock_object, pri);
278 sleepq_wait(&lk->lock_object, pri);
280 if ((flags & LK_SLEEPFAIL) && error == 0)
287 wakeupshlk(struct lock *lk, const char *file, int line)
291 int queue, wakeup_swapper;
296 if (lockmgr_sunlock_try(lk, x))
300 * We should have a sharer with waiters, so enter the hard
301 * path in order to handle wakeups correctly.
303 sleepq_lock(&lk->lock_object);
304 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
308 * If the lock has exclusive waiters, give them preference in
309 * order to avoid deadlock with shared runners up.
310 * If interruptible sleeps left the exclusive queue empty
311 * avoid a starvation for the threads sleeping on the shared
312 * queue by giving them precedence and cleaning up the
313 * exclusive waiters bit anyway.
314 * Please note that lk_exslpfail count may be lying about
315 * the real number of waiters with the LK_SLEEPFAIL flag on
316 * because they may be used in conjunction with interruptible
317 * sleeps so lk_exslpfail might be considered an 'upper limit'
318 * bound, including the edge cases.
320 realexslp = sleepq_sleepcnt(&lk->lock_object,
322 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
323 if (lk->lk_exslpfail < realexslp) {
324 lk->lk_exslpfail = 0;
325 queue = SQ_EXCLUSIVE_QUEUE;
326 v |= (x & LK_SHARED_WAITERS);
328 lk->lk_exslpfail = 0;
330 "%s: %p has only LK_SLEEPFAIL sleepers",
333 "%s: %p waking up threads on the exclusive queue",
336 sleepq_broadcast(&lk->lock_object,
337 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
338 queue = SQ_SHARED_QUEUE;
344 * Exclusive waiters sleeping with LK_SLEEPFAIL on
345 * and using interruptible sleeps/timeout may have
346 * left spourious lk_exslpfail counts on, so clean
349 lk->lk_exslpfail = 0;
350 queue = SQ_SHARED_QUEUE;
353 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
355 sleepq_release(&lk->lock_object);
358 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
359 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
361 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
363 sleepq_release(&lk->lock_object);
367 lockmgr_note_shared_release(lk, file, line);
368 return (wakeup_swapper);
372 assert_lockmgr(const struct lock_object *lock, int what)
375 panic("lockmgr locks do not support assertions");
379 lock_lockmgr(struct lock_object *lock, uintptr_t how)
382 panic("lockmgr locks do not support sleep interlocking");
386 unlock_lockmgr(struct lock_object *lock)
389 panic("lockmgr locks do not support sleep interlocking");
394 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
397 panic("lockmgr locks do not support owner inquiring");
402 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
406 MPASS((flags & ~LK_INIT_MASK) == 0);
407 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
408 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
411 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
412 if (flags & LK_CANRECURSE)
413 iflags |= LO_RECURSABLE;
414 if ((flags & LK_NODUP) == 0)
416 if (flags & LK_NOPROFILE)
417 iflags |= LO_NOPROFILE;
418 if ((flags & LK_NOWITNESS) == 0)
419 iflags |= LO_WITNESS;
420 if (flags & LK_QUIET)
422 if (flags & LK_IS_VNODE)
423 iflags |= LO_IS_VNODE;
424 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
426 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
427 lk->lk_lock = LK_UNLOCKED;
429 lk->lk_exslpfail = 0;
436 * XXX: Gross hacks to manipulate external lock flags after
437 * initialization. Used for certain vnode and buf locks.
440 lockallowshare(struct lock *lk)
443 lockmgr_assert(lk, KA_XLOCKED);
444 lk->lock_object.lo_flags &= ~LK_NOSHARE;
448 lockdisableshare(struct lock *lk)
451 lockmgr_assert(lk, KA_XLOCKED);
452 lk->lock_object.lo_flags |= LK_NOSHARE;
456 lockallowrecurse(struct lock *lk)
459 lockmgr_assert(lk, KA_XLOCKED);
460 lk->lock_object.lo_flags |= LO_RECURSABLE;
464 lockdisablerecurse(struct lock *lk)
467 lockmgr_assert(lk, KA_XLOCKED);
468 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
472 lockdestroy(struct lock *lk)
475 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
476 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
477 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
478 lock_destroy(&lk->lock_object);
481 static bool __always_inline
482 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags)
486 * If no other thread has an exclusive lock, or
487 * no exclusive waiter is present, bump the count of
488 * sharers. Since we have to preserve the state of
489 * waiters, if we fail to acquire the shared lock
490 * loop back and retry.
493 while (LK_CAN_SHARE(*xp, flags)) {
494 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
495 *xp + LK_ONE_SHARER)) {
502 static bool __always_inline
503 lockmgr_sunlock_try(struct lock *lk, uintptr_t x)
508 * If there is more than one shared lock held, just drop one
511 if (LK_SHARERS(x) > 1) {
512 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
519 * If there are not waiters on the exclusive queue, drop the
522 if ((x & LK_ALL_WAITERS) == 0) {
523 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
525 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
536 lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
537 const char *file, int line)
539 struct lock_class *class;
544 op = flags & LK_TYPE_MASK;
548 if (LK_CAN_WITNESS(flags))
549 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
550 file, line, flags & LK_INTERLOCK ? ilk : NULL);
551 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
553 if (lockmgr_slock_try(lk, &x, flags)) {
554 lockmgr_note_shared_acquire(lk, 0, 0,
560 if (LK_CAN_WITNESS(flags))
561 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
562 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
564 tid = (uintptr_t)curthread;
565 if (lk->lk_lock == LK_UNLOCKED &&
566 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
567 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
574 _lockmgr_assert(lk, KA_SLOCKED, file, line);
575 tid = (uintptr_t)curthread;
577 x = v & LK_ALL_WAITERS;
578 v &= LK_EXCLUSIVE_SPINNERS;
579 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
581 lockmgr_note_exclusive_upgrade(lk, file, line, flags);
588 if (__predict_true(locked)) {
589 if (__predict_false(flags & LK_INTERLOCK)) {
590 class = LOCK_CLASS(ilk);
591 class->lc_unlock(ilk);
595 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
596 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
601 lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
603 struct lock_class *class;
612 _lockmgr_assert(lk, KA_LOCKED, file, line);
615 if (__predict_true(x & LK_SHARE) != 0) {
616 if (lockmgr_sunlock_try(lk, x)) {
617 lockmgr_note_shared_release(lk, file, line);
621 tid = (uintptr_t)curthread;
622 if (!lockmgr_recursed(lk) &&
623 atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
624 lockmgr_note_exclusive_release(lk, file, line);
628 if (__predict_true(unlocked)) {
629 if (__predict_false(flags & LK_INTERLOCK)) {
630 class = LOCK_CLASS(ilk);
631 class->lc_unlock(ilk);
635 return (__lockmgr_args(lk, flags | LK_RELEASE, ilk, LK_WMESG_DEFAULT,
636 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE));
641 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
642 const char *wmesg, int pri, int timo, const char *file, int line)
645 struct lock_class *class;
649 int error, ipri, itimo, queue, wakeup_swapper;
650 #ifdef LOCK_PROFILING
651 uint64_t waittime = 0;
656 tid = (uintptr_t)curthread;
657 op = (flags & LK_TYPE_MASK);
658 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
659 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
660 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
662 MPASS((flags & ~LK_TOTAL_MASK) == 0);
663 KASSERT((op & (op - 1)) == 0,
664 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
665 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
666 (op != LK_DOWNGRADE && op != LK_RELEASE),
667 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
668 __func__, file, line));
669 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
670 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
671 __func__, file, line));
672 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
673 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
674 lk->lock_object.lo_name, file, line));
676 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
677 if (panicstr != NULL) {
678 if (flags & LK_INTERLOCK)
679 class->lc_unlock(ilk);
683 if (lk->lock_object.lo_flags & LK_NOSHARE) {
691 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
693 if (flags & LK_INTERLOCK)
694 class->lc_unlock(ilk);
702 if (LK_CAN_WITNESS(flags))
703 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
704 file, line, flags & LK_INTERLOCK ? ilk : NULL);
706 if (lockmgr_slock_try(lk, &x, flags))
709 PMC_SOFT_CALL( , , lock, failed);
711 lock_profile_obtain_lock_failed(&lk->lock_object,
712 &contested, &waittime);
715 * If the lock is already held by curthread in
716 * exclusive way avoid a deadlock.
718 if (LK_HOLDER(x) == tid) {
720 "%s: %p already held in exclusive mode",
727 * If the lock is expected to not sleep just give up
730 if (LK_TRYOP(flags)) {
731 LOCK_LOG2(lk, "%s: %p fails the try operation",
738 * Acquire the sleepqueue chain lock because we
739 * probabilly will need to manipulate waiters flags.
741 sleepq_lock(&lk->lock_object);
745 * if the lock can be acquired in shared mode, try
748 if (LK_CAN_SHARE(x, flags)) {
749 sleepq_release(&lk->lock_object);
754 * Try to set the LK_SHARED_WAITERS flag. If we fail,
755 * loop back and retry.
757 if ((x & LK_SHARED_WAITERS) == 0) {
758 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
759 x | LK_SHARED_WAITERS)) {
760 sleepq_release(&lk->lock_object);
763 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
768 * As far as we have been unable to acquire the
769 * shared lock and the shared waiters flag is set,
772 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
774 flags &= ~LK_INTERLOCK;
777 "%s: interrupted sleep for %p with %d",
778 __func__, lk, error);
781 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
785 #ifdef LOCK_PROFILING
786 lockmgr_note_shared_acquire(lk, contested, waittime,
789 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
796 _lockmgr_assert(lk, KA_SLOCKED, file, line);
798 x = v & LK_ALL_WAITERS;
799 v &= LK_EXCLUSIVE_SPINNERS;
802 * Try to switch from one shared lock to an exclusive one.
803 * We need to preserve waiters flags during the operation.
805 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
807 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
809 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
810 LK_TRYWIT(flags), file, line);
811 TD_SLOCKS_DEC(curthread);
816 * In LK_TRYUPGRADE mode, do not drop the lock,
817 * returning EBUSY instead.
819 if (op == LK_TRYUPGRADE) {
820 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
827 * We have been unable to succeed in upgrading, so just
828 * give up the shared lock.
830 wakeup_swapper |= wakeupshlk(lk, file, line);
834 if (LK_CAN_WITNESS(flags))
835 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
836 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
840 * If curthread already holds the lock and this one is
841 * allowed to recurse, simply recurse on it.
843 if (lockmgr_xlocked(lk)) {
844 if ((flags & LK_CANRECURSE) == 0 &&
845 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
848 * If the lock is expected to not panic just
849 * give up and return.
851 if (LK_TRYOP(flags)) {
853 "%s: %p fails the try operation",
858 if (flags & LK_INTERLOCK)
859 class->lc_unlock(ilk);
860 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
861 __func__, iwmesg, file, line);
864 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
865 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
866 lk->lk_recurse, file, line);
867 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
868 LK_TRYWIT(flags), file, line);
869 TD_LOCKS_INC(curthread);
874 if (lk->lk_lock == LK_UNLOCKED &&
875 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
878 PMC_SOFT_CALL( , , lock, failed);
880 lock_profile_obtain_lock_failed(&lk->lock_object,
881 &contested, &waittime);
884 * If the lock is expected to not sleep just give up
887 if (LK_TRYOP(flags)) {
888 LOCK_LOG2(lk, "%s: %p fails the try operation",
895 * Acquire the sleepqueue chain lock because we
896 * probabilly will need to manipulate waiters flags.
898 sleepq_lock(&lk->lock_object);
902 * if the lock has been released while we spun on
903 * the sleepqueue chain lock just try again.
905 if (x == LK_UNLOCKED) {
906 sleepq_release(&lk->lock_object);
911 * The lock can be in the state where there is a
912 * pending queue of waiters, but still no owner.
913 * This happens when the lock is contested and an
914 * owner is going to claim the lock.
915 * If curthread is the one successfully acquiring it
916 * claim lock ownership and return, preserving waiters
919 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
920 if ((x & ~v) == LK_UNLOCKED) {
921 v &= ~LK_EXCLUSIVE_SPINNERS;
922 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
924 sleepq_release(&lk->lock_object);
926 "%s: %p claimed by a new writer",
930 sleepq_release(&lk->lock_object);
935 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
936 * fail, loop back and retry.
938 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
939 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
940 x | LK_EXCLUSIVE_WAITERS)) {
941 sleepq_release(&lk->lock_object);
944 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
949 * As far as we have been unable to acquire the
950 * exclusive lock and the exclusive waiters flag
951 * is set, we will sleep.
953 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
955 flags &= ~LK_INTERLOCK;
958 "%s: interrupted sleep for %p with %d",
959 __func__, lk, error);
962 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
966 #ifdef LOCK_PROFILING
967 lockmgr_note_exclusive_acquire(lk, contested, waittime,
970 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
976 _lockmgr_assert(lk, KA_XLOCKED, file, line);
977 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
978 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
981 * Panic if the lock is recursed.
983 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
984 if (flags & LK_INTERLOCK)
985 class->lc_unlock(ilk);
986 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
987 __func__, iwmesg, file, line);
989 TD_SLOCKS_INC(curthread);
992 * In order to preserve waiters flags, just spin.
996 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
998 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
999 LK_SHARERS_LOCK(1) | x))
1005 _lockmgr_assert(lk, KA_LOCKED, file, line);
1008 if ((x & LK_SHARE) == 0) {
1011 * As first option, treact the lock as if it has not
1013 * Fix-up the tid var if the lock has been disowned.
1015 if (LK_HOLDER(x) == LK_KERNPROC)
1018 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1020 TD_LOCKS_DEC(curthread);
1022 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1023 lk->lk_recurse, file, line);
1026 * The lock is held in exclusive mode.
1027 * If the lock is recursed also, then unrecurse it.
1029 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1030 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1035 if (tid != LK_KERNPROC)
1036 lock_profile_release_lock(&lk->lock_object);
1038 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1042 sleepq_lock(&lk->lock_object);
1047 * If the lock has exclusive waiters, give them
1048 * preference in order to avoid deadlock with
1049 * shared runners up.
1050 * If interruptible sleeps left the exclusive queue
1051 * empty avoid a starvation for the threads sleeping
1052 * on the shared queue by giving them precedence
1053 * and cleaning up the exclusive waiters bit anyway.
1054 * Please note that lk_exslpfail count may be lying
1055 * about the real number of waiters with the
1056 * LK_SLEEPFAIL flag on because they may be used in
1057 * conjunction with interruptible sleeps so
1058 * lk_exslpfail might be considered an 'upper limit'
1059 * bound, including the edge cases.
1061 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1062 realexslp = sleepq_sleepcnt(&lk->lock_object,
1063 SQ_EXCLUSIVE_QUEUE);
1064 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1065 if (lk->lk_exslpfail < realexslp) {
1066 lk->lk_exslpfail = 0;
1067 queue = SQ_EXCLUSIVE_QUEUE;
1068 v |= (x & LK_SHARED_WAITERS);
1070 lk->lk_exslpfail = 0;
1072 "%s: %p has only LK_SLEEPFAIL sleepers",
1075 "%s: %p waking up threads on the exclusive queue",
1078 sleepq_broadcast(&lk->lock_object,
1079 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1080 queue = SQ_SHARED_QUEUE;
1085 * Exclusive waiters sleeping with LK_SLEEPFAIL
1086 * on and using interruptible sleeps/timeout
1087 * may have left spourious lk_exslpfail counts
1088 * on, so clean it up anyway.
1090 lk->lk_exslpfail = 0;
1091 queue = SQ_SHARED_QUEUE;
1095 "%s: %p waking up threads on the %s queue",
1096 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1098 atomic_store_rel_ptr(&lk->lk_lock, v);
1099 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1100 SLEEPQ_LK, 0, queue);
1101 sleepq_release(&lk->lock_object);
1104 wakeup_swapper = wakeupshlk(lk, file, line);
1107 if (LK_CAN_WITNESS(flags))
1108 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1109 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1113 * Trying to drain a lock we already own will result in a
1116 if (lockmgr_xlocked(lk)) {
1117 if (flags & LK_INTERLOCK)
1118 class->lc_unlock(ilk);
1119 panic("%s: draining %s with the lock held @ %s:%d\n",
1120 __func__, iwmesg, file, line);
1124 if (lk->lk_lock == LK_UNLOCKED &&
1125 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1129 PMC_SOFT_CALL( , , lock, failed);
1131 lock_profile_obtain_lock_failed(&lk->lock_object,
1132 &contested, &waittime);
1135 * If the lock is expected to not sleep just give up
1138 if (LK_TRYOP(flags)) {
1139 LOCK_LOG2(lk, "%s: %p fails the try operation",
1146 * Acquire the sleepqueue chain lock because we
1147 * probabilly will need to manipulate waiters flags.
1149 sleepq_lock(&lk->lock_object);
1153 * if the lock has been released while we spun on
1154 * the sleepqueue chain lock just try again.
1156 if (x == LK_UNLOCKED) {
1157 sleepq_release(&lk->lock_object);
1161 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1162 if ((x & ~v) == LK_UNLOCKED) {
1163 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1166 * If interruptible sleeps left the exclusive
1167 * queue empty avoid a starvation for the
1168 * threads sleeping on the shared queue by
1169 * giving them precedence and cleaning up the
1170 * exclusive waiters bit anyway.
1171 * Please note that lk_exslpfail count may be
1172 * lying about the real number of waiters with
1173 * the LK_SLEEPFAIL flag on because they may
1174 * be used in conjunction with interruptible
1175 * sleeps so lk_exslpfail might be considered
1176 * an 'upper limit' bound, including the edge
1179 if (v & LK_EXCLUSIVE_WAITERS) {
1180 queue = SQ_EXCLUSIVE_QUEUE;
1181 v &= ~LK_EXCLUSIVE_WAITERS;
1185 * Exclusive waiters sleeping with
1186 * LK_SLEEPFAIL on and using
1187 * interruptible sleeps/timeout may
1188 * have left spourious lk_exslpfail
1189 * counts on, so clean it up anyway.
1191 MPASS(v & LK_SHARED_WAITERS);
1192 lk->lk_exslpfail = 0;
1193 queue = SQ_SHARED_QUEUE;
1194 v &= ~LK_SHARED_WAITERS;
1196 if (queue == SQ_EXCLUSIVE_QUEUE) {
1198 sleepq_sleepcnt(&lk->lock_object,
1199 SQ_EXCLUSIVE_QUEUE);
1200 if (lk->lk_exslpfail >= realexslp) {
1201 lk->lk_exslpfail = 0;
1202 queue = SQ_SHARED_QUEUE;
1203 v &= ~LK_SHARED_WAITERS;
1204 if (realexslp != 0) {
1206 "%s: %p has only LK_SLEEPFAIL sleepers",
1209 "%s: %p waking up threads on the exclusive queue",
1215 SQ_EXCLUSIVE_QUEUE);
1218 lk->lk_exslpfail = 0;
1220 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1221 sleepq_release(&lk->lock_object);
1225 "%s: %p waking up all threads on the %s queue",
1226 __func__, lk, queue == SQ_SHARED_QUEUE ?
1227 "shared" : "exclusive");
1228 wakeup_swapper |= sleepq_broadcast(
1229 &lk->lock_object, SLEEPQ_LK, 0, queue);
1232 * If shared waiters have been woken up we need
1233 * to wait for one of them to acquire the lock
1234 * before to set the exclusive waiters in
1235 * order to avoid a deadlock.
1237 if (queue == SQ_SHARED_QUEUE) {
1238 for (v = lk->lk_lock;
1239 (v & LK_SHARE) && !LK_SHARERS(v);
1246 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1247 * fail, loop back and retry.
1249 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1250 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1251 x | LK_EXCLUSIVE_WAITERS)) {
1252 sleepq_release(&lk->lock_object);
1255 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1260 * As far as we have been unable to acquire the
1261 * exclusive lock and the exclusive waiters flag
1262 * is set, we will sleep.
1264 if (flags & LK_INTERLOCK) {
1265 class->lc_unlock(ilk);
1266 flags &= ~LK_INTERLOCK;
1269 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1270 SQ_EXCLUSIVE_QUEUE);
1271 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1273 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1278 lock_profile_obtain_lock_success(&lk->lock_object,
1279 contested, waittime, file, line);
1280 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1281 lk->lk_recurse, file, line);
1282 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1283 LK_TRYWIT(flags), file, line);
1284 TD_LOCKS_INC(curthread);
1289 if (flags & LK_INTERLOCK)
1290 class->lc_unlock(ilk);
1291 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1294 if (flags & LK_INTERLOCK)
1295 class->lc_unlock(ilk);
1303 _lockmgr_disown(struct lock *lk, const char *file, int line)
1307 if (SCHEDULER_STOPPED())
1310 tid = (uintptr_t)curthread;
1311 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1314 * Panic if the lock is recursed.
1316 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1317 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1318 __func__, file, line);
1321 * If the owner is already LK_KERNPROC just skip the whole operation.
1323 if (LK_HOLDER(lk->lk_lock) != tid)
1325 lock_profile_release_lock(&lk->lock_object);
1326 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1327 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1328 TD_LOCKS_DEC(curthread);
1332 * In order to preserve waiters flags, just spin.
1336 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1337 x &= LK_ALL_WAITERS;
1338 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1346 lockmgr_printinfo(const struct lock *lk)
1351 if (lk->lk_lock == LK_UNLOCKED)
1352 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1353 else if (lk->lk_lock & LK_SHARE)
1354 printf("lock type %s: SHARED (count %ju)\n",
1355 lk->lock_object.lo_name,
1356 (uintmax_t)LK_SHARERS(lk->lk_lock));
1358 td = lockmgr_xholder(lk);
1359 if (td == (struct thread *)LK_KERNPROC)
1360 printf("lock type %s: EXCL by KERNPROC\n",
1361 lk->lock_object.lo_name);
1363 printf("lock type %s: EXCL by thread %p "
1364 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1365 td, td->td_proc->p_pid, td->td_proc->p_comm,
1370 if (x & LK_EXCLUSIVE_WAITERS)
1371 printf(" with exclusive waiters pending\n");
1372 if (x & LK_SHARED_WAITERS)
1373 printf(" with shared waiters pending\n");
1374 if (x & LK_EXCLUSIVE_SPINNERS)
1375 printf(" with exclusive spinners pending\n");
1381 lockstatus(const struct lock *lk)
1390 if ((x & LK_SHARE) == 0) {
1391 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1395 } else if (x == LK_UNLOCKED)
1401 #ifdef INVARIANT_SUPPORT
1403 FEATURE(invariant_support,
1404 "Support for modules compiled with INVARIANTS option");
1407 #undef _lockmgr_assert
1411 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1415 if (panicstr != NULL)
1419 case KA_SLOCKED | KA_NOTRECURSED:
1420 case KA_SLOCKED | KA_RECURSED:
1423 case KA_LOCKED | KA_NOTRECURSED:
1424 case KA_LOCKED | KA_RECURSED:
1428 * We cannot trust WITNESS if the lock is held in exclusive
1429 * mode and a call to lockmgr_disown() happened.
1430 * Workaround this skipping the check if the lock is held in
1431 * exclusive mode even for the KA_LOCKED case.
1433 if (slocked || (lk->lk_lock & LK_SHARE)) {
1434 witness_assert(&lk->lock_object, what, file, line);
1438 if (lk->lk_lock == LK_UNLOCKED ||
1439 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1440 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1441 panic("Lock %s not %slocked @ %s:%d\n",
1442 lk->lock_object.lo_name, slocked ? "share" : "",
1445 if ((lk->lk_lock & LK_SHARE) == 0) {
1446 if (lockmgr_recursed(lk)) {
1447 if (what & KA_NOTRECURSED)
1448 panic("Lock %s recursed @ %s:%d\n",
1449 lk->lock_object.lo_name, file,
1451 } else if (what & KA_RECURSED)
1452 panic("Lock %s not recursed @ %s:%d\n",
1453 lk->lock_object.lo_name, file, line);
1457 case KA_XLOCKED | KA_NOTRECURSED:
1458 case KA_XLOCKED | KA_RECURSED:
1459 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1460 panic("Lock %s not exclusively locked @ %s:%d\n",
1461 lk->lock_object.lo_name, file, line);
1462 if (lockmgr_recursed(lk)) {
1463 if (what & KA_NOTRECURSED)
1464 panic("Lock %s recursed @ %s:%d\n",
1465 lk->lock_object.lo_name, file, line);
1466 } else if (what & KA_RECURSED)
1467 panic("Lock %s not recursed @ %s:%d\n",
1468 lk->lock_object.lo_name, file, line);
1471 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1472 panic("Lock %s exclusively locked @ %s:%d\n",
1473 lk->lock_object.lo_name, file, line);
1476 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1484 lockmgr_chain(struct thread *td, struct thread **ownerp)
1490 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1492 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1493 if (lk->lk_lock & LK_SHARE)
1494 db_printf("SHARED (count %ju)\n",
1495 (uintmax_t)LK_SHARERS(lk->lk_lock));
1497 db_printf("EXCL\n");
1498 *ownerp = lockmgr_xholder(lk);
1504 db_show_lockmgr(const struct lock_object *lock)
1507 const struct lock *lk;
1509 lk = (const struct lock *)lock;
1511 db_printf(" state: ");
1512 if (lk->lk_lock == LK_UNLOCKED)
1513 db_printf("UNLOCKED\n");
1514 else if (lk->lk_lock & LK_SHARE)
1515 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1517 td = lockmgr_xholder(lk);
1518 if (td == (struct thread *)LK_KERNPROC)
1519 db_printf("XLOCK: LK_KERNPROC\n");
1521 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1522 td->td_tid, td->td_proc->p_pid,
1523 td->td_proc->p_comm);
1524 if (lockmgr_recursed(lk))
1525 db_printf(" recursed: %d\n", lk->lk_recurse);
1527 db_printf(" waiters: ");
1528 switch (lk->lk_lock & LK_ALL_WAITERS) {
1529 case LK_SHARED_WAITERS:
1530 db_printf("shared\n");
1532 case LK_EXCLUSIVE_WAITERS:
1533 db_printf("exclusive\n");
1535 case LK_ALL_WAITERS:
1536 db_printf("shared and exclusive\n");
1539 db_printf("none\n");
1541 db_printf(" spinners: ");
1542 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1543 db_printf("exclusive\n");
1545 db_printf("none\n");