2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 #include "opt_hwpmc_hooks.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
45 #include <sys/sleepqueue.h>
47 #include <sys/stack.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
52 #include <machine/cpu.h>
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64 (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
68 #define SQ_EXCLUSIVE_QUEUE 0
69 #define SQ_SHARED_QUEUE 1
72 #define _lockmgr_assert(lk, what, file, line)
75 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
76 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
79 #define STACK_PRINT(lk)
80 #define STACK_SAVE(lk)
81 #define STACK_ZERO(lk)
83 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
84 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
85 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
88 #define LOCK_LOG2(lk, string, arg1, arg2) \
89 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
90 CTR2(KTR_LOCK, (string), (arg1), (arg2))
91 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
93 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
95 #define GIANT_DECLARE \
97 WITNESS_SAVE_DECL(Giant)
98 #define GIANT_RESTORE() do { \
102 WITNESS_RESTORE(&Giant.lock_object, Giant); \
105 #define GIANT_SAVE() do { \
106 if (mtx_owned(&Giant)) { \
107 WITNESS_SAVE(&Giant.lock_object, Giant); \
108 while (mtx_owned(&Giant)) { \
110 mtx_unlock(&Giant); \
115 #define LK_CAN_SHARE(x, flags) \
116 (((x) & LK_SHARE) && \
117 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
118 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \
119 (curthread->td_pflags & TDP_DEADLKTREAT)))
120 #define LK_TRYOP(x) \
123 #define LK_CAN_WITNESS(x) \
124 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
125 #define LK_TRYWIT(x) \
126 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
128 #define LK_CAN_ADAPT(lk, f) \
129 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
130 ((f) & LK_SLEEPFAIL) == 0)
132 #define lockmgr_disowned(lk) \
133 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
135 #define lockmgr_xlocked(lk) \
136 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
138 static void assert_lockmgr(const struct lock_object *lock, int how);
140 static void db_show_lockmgr(const struct lock_object *lock);
142 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
144 static int owner_lockmgr(const struct lock_object *lock,
145 struct thread **owner);
147 static uintptr_t unlock_lockmgr(struct lock_object *lock);
149 struct lock_class lock_class_lockmgr = {
150 .lc_name = "lockmgr",
151 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
152 .lc_assert = assert_lockmgr,
154 .lc_ddb_show = db_show_lockmgr,
156 .lc_lock = lock_lockmgr,
157 .lc_unlock = unlock_lockmgr,
159 .lc_owner = owner_lockmgr,
163 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
165 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t x);
168 lockmgr_note_shared_acquire(struct lock *lk, int contested,
169 uint64_t waittime, const char *file, int line, int flags)
172 lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
174 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
175 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
176 TD_LOCKS_INC(curthread);
177 TD_SLOCKS_INC(curthread);
182 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
185 lock_profile_release_lock(&lk->lock_object);
186 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
187 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
188 TD_LOCKS_DEC(curthread);
189 TD_SLOCKS_DEC(curthread);
193 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
194 uint64_t waittime, const char *file, int line, int flags)
197 lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
199 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
200 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
202 TD_LOCKS_INC(curthread);
207 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
210 lock_profile_release_lock(&lk->lock_object);
211 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
213 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
214 TD_LOCKS_DEC(curthread);
218 lockmgr_note_exclusive_upgrade(struct lock *lk, const char *file, int line,
222 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
224 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
225 LK_TRYWIT(flags), file, line);
226 TD_SLOCKS_DEC(curthread);
229 static __inline struct thread *
230 lockmgr_xholder(const struct lock *lk)
235 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
239 * It assumes sleepq_lock held and returns with this one unheld.
240 * It also assumes the generic interlock is sane and previously checked.
241 * If LK_INTERLOCK is specified the interlock is not reacquired after the
245 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
246 const char *wmesg, int pri, int timo, int queue)
249 struct lock_class *class;
252 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
253 catch = pri & PCATCH;
257 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
258 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
260 if (flags & LK_INTERLOCK)
261 class->lc_unlock(ilk);
262 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
265 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
266 SLEEPQ_INTERRUPTIBLE : 0), queue);
267 if ((flags & LK_TIMELOCK) && timo)
268 sleepq_set_timeout(&lk->lock_object, timo);
271 * Decisional switch for real sleeping.
273 if ((flags & LK_TIMELOCK) && timo && catch)
274 error = sleepq_timedwait_sig(&lk->lock_object, pri);
275 else if ((flags & LK_TIMELOCK) && timo)
276 error = sleepq_timedwait(&lk->lock_object, pri);
278 error = sleepq_wait_sig(&lk->lock_object, pri);
280 sleepq_wait(&lk->lock_object, pri);
282 if ((flags & LK_SLEEPFAIL) && error == 0)
289 wakeupshlk(struct lock *lk, const char *file, int line)
293 int queue, wakeup_swapper;
298 if (lockmgr_sunlock_try(lk, x))
302 * We should have a sharer with waiters, so enter the hard
303 * path in order to handle wakeups correctly.
305 sleepq_lock(&lk->lock_object);
306 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
310 * If the lock has exclusive waiters, give them preference in
311 * order to avoid deadlock with shared runners up.
312 * If interruptible sleeps left the exclusive queue empty
313 * avoid a starvation for the threads sleeping on the shared
314 * queue by giving them precedence and cleaning up the
315 * exclusive waiters bit anyway.
316 * Please note that lk_exslpfail count may be lying about
317 * the real number of waiters with the LK_SLEEPFAIL flag on
318 * because they may be used in conjunction with interruptible
319 * sleeps so lk_exslpfail might be considered an 'upper limit'
320 * bound, including the edge cases.
322 realexslp = sleepq_sleepcnt(&lk->lock_object,
324 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
325 if (lk->lk_exslpfail < realexslp) {
326 lk->lk_exslpfail = 0;
327 queue = SQ_EXCLUSIVE_QUEUE;
328 v |= (x & LK_SHARED_WAITERS);
330 lk->lk_exslpfail = 0;
332 "%s: %p has only LK_SLEEPFAIL sleepers",
335 "%s: %p waking up threads on the exclusive queue",
338 sleepq_broadcast(&lk->lock_object,
339 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
340 queue = SQ_SHARED_QUEUE;
346 * Exclusive waiters sleeping with LK_SLEEPFAIL on
347 * and using interruptible sleeps/timeout may have
348 * left spourious lk_exslpfail counts on, so clean
351 lk->lk_exslpfail = 0;
352 queue = SQ_SHARED_QUEUE;
355 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
357 sleepq_release(&lk->lock_object);
360 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
361 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
363 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
365 sleepq_release(&lk->lock_object);
369 lockmgr_note_shared_release(lk, file, line);
370 return (wakeup_swapper);
374 assert_lockmgr(const struct lock_object *lock, int what)
377 panic("lockmgr locks do not support assertions");
381 lock_lockmgr(struct lock_object *lock, uintptr_t how)
384 panic("lockmgr locks do not support sleep interlocking");
388 unlock_lockmgr(struct lock_object *lock)
391 panic("lockmgr locks do not support sleep interlocking");
396 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
399 panic("lockmgr locks do not support owner inquiring");
404 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
408 MPASS((flags & ~LK_INIT_MASK) == 0);
409 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
410 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
413 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
414 if (flags & LK_CANRECURSE)
415 iflags |= LO_RECURSABLE;
416 if ((flags & LK_NODUP) == 0)
418 if (flags & LK_NOPROFILE)
419 iflags |= LO_NOPROFILE;
420 if ((flags & LK_NOWITNESS) == 0)
421 iflags |= LO_WITNESS;
422 if (flags & LK_QUIET)
424 if (flags & LK_IS_VNODE)
425 iflags |= LO_IS_VNODE;
426 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
428 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
429 lk->lk_lock = LK_UNLOCKED;
431 lk->lk_exslpfail = 0;
438 * XXX: Gross hacks to manipulate external lock flags after
439 * initialization. Used for certain vnode and buf locks.
442 lockallowshare(struct lock *lk)
445 lockmgr_assert(lk, KA_XLOCKED);
446 lk->lock_object.lo_flags &= ~LK_NOSHARE;
450 lockdisableshare(struct lock *lk)
453 lockmgr_assert(lk, KA_XLOCKED);
454 lk->lock_object.lo_flags |= LK_NOSHARE;
458 lockallowrecurse(struct lock *lk)
461 lockmgr_assert(lk, KA_XLOCKED);
462 lk->lock_object.lo_flags |= LO_RECURSABLE;
466 lockdisablerecurse(struct lock *lk)
469 lockmgr_assert(lk, KA_XLOCKED);
470 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
474 lockdestroy(struct lock *lk)
477 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
478 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
479 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
480 lock_destroy(&lk->lock_object);
483 static bool __always_inline
484 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags)
488 * If no other thread has an exclusive lock, or
489 * no exclusive waiter is present, bump the count of
490 * sharers. Since we have to preserve the state of
491 * waiters, if we fail to acquire the shared lock
492 * loop back and retry.
495 while (LK_CAN_SHARE(*xp, flags)) {
496 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
497 *xp + LK_ONE_SHARER)) {
504 static bool __always_inline
505 lockmgr_sunlock_try(struct lock *lk, uintptr_t x)
510 * If there is more than one shared lock held, just drop one
513 if (LK_SHARERS(x) > 1) {
514 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
521 * If there are not waiters on the exclusive queue, drop the
524 if ((x & LK_ALL_WAITERS) == 0) {
525 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
527 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
538 lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
539 const char *file, int line)
541 struct lock_class *class;
546 op = flags & LK_TYPE_MASK;
550 if (LK_CAN_WITNESS(flags))
551 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
552 file, line, flags & LK_INTERLOCK ? ilk : NULL);
553 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
555 if (lockmgr_slock_try(lk, &x, flags)) {
556 lockmgr_note_shared_acquire(lk, 0, 0,
562 if (LK_CAN_WITNESS(flags))
563 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
564 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
566 tid = (uintptr_t)curthread;
567 if (lk->lk_lock == LK_UNLOCKED &&
568 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
569 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
576 _lockmgr_assert(lk, KA_SLOCKED, file, line);
577 tid = (uintptr_t)curthread;
579 x = v & LK_ALL_WAITERS;
580 v &= LK_EXCLUSIVE_SPINNERS;
581 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
583 lockmgr_note_exclusive_upgrade(lk, file, line, flags);
590 if (__predict_true(locked)) {
591 if (__predict_false(flags & LK_INTERLOCK)) {
592 class = LOCK_CLASS(ilk);
593 class->lc_unlock(ilk);
597 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
598 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
603 lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
605 struct lock_class *class;
614 _lockmgr_assert(lk, KA_LOCKED, file, line);
617 if (__predict_true(x & LK_SHARE) != 0) {
618 if (lockmgr_sunlock_try(lk, x)) {
619 lockmgr_note_shared_release(lk, file, line);
623 tid = (uintptr_t)curthread;
624 if (!lockmgr_recursed(lk) &&
625 atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
626 lockmgr_note_exclusive_release(lk, file, line);
630 if (__predict_true(unlocked)) {
631 if (__predict_false(flags & LK_INTERLOCK)) {
632 class = LOCK_CLASS(ilk);
633 class->lc_unlock(ilk);
637 return (__lockmgr_args(lk, flags | LK_RELEASE, ilk, LK_WMESG_DEFAULT,
638 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE));
643 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
644 const char *wmesg, int pri, int timo, const char *file, int line)
647 struct lock_class *class;
651 int error, ipri, itimo, queue, wakeup_swapper;
652 #ifdef LOCK_PROFILING
653 uint64_t waittime = 0;
658 tid = (uintptr_t)curthread;
659 op = (flags & LK_TYPE_MASK);
660 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
661 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
662 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
664 MPASS((flags & ~LK_TOTAL_MASK) == 0);
665 KASSERT((op & (op - 1)) == 0,
666 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
667 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
668 (op != LK_DOWNGRADE && op != LK_RELEASE),
669 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
670 __func__, file, line));
671 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
672 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
673 __func__, file, line));
674 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
675 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
676 lk->lock_object.lo_name, file, line));
678 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
679 if (panicstr != NULL) {
680 if (flags & LK_INTERLOCK)
681 class->lc_unlock(ilk);
685 if (lk->lock_object.lo_flags & LK_NOSHARE) {
693 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
695 if (flags & LK_INTERLOCK)
696 class->lc_unlock(ilk);
704 if (LK_CAN_WITNESS(flags))
705 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
706 file, line, flags & LK_INTERLOCK ? ilk : NULL);
708 if (lockmgr_slock_try(lk, &x, flags))
711 PMC_SOFT_CALL( , , lock, failed);
713 lock_profile_obtain_lock_failed(&lk->lock_object,
714 &contested, &waittime);
717 * If the lock is already held by curthread in
718 * exclusive way avoid a deadlock.
720 if (LK_HOLDER(x) == tid) {
722 "%s: %p already held in exclusive mode",
729 * If the lock is expected to not sleep just give up
732 if (LK_TRYOP(flags)) {
733 LOCK_LOG2(lk, "%s: %p fails the try operation",
740 * Acquire the sleepqueue chain lock because we
741 * probabilly will need to manipulate waiters flags.
743 sleepq_lock(&lk->lock_object);
747 * if the lock can be acquired in shared mode, try
750 if (LK_CAN_SHARE(x, flags)) {
751 sleepq_release(&lk->lock_object);
756 * Try to set the LK_SHARED_WAITERS flag. If we fail,
757 * loop back and retry.
759 if ((x & LK_SHARED_WAITERS) == 0) {
760 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
761 x | LK_SHARED_WAITERS)) {
762 sleepq_release(&lk->lock_object);
765 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
770 * As far as we have been unable to acquire the
771 * shared lock and the shared waiters flag is set,
774 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
776 flags &= ~LK_INTERLOCK;
779 "%s: interrupted sleep for %p with %d",
780 __func__, lk, error);
783 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
787 #ifdef LOCK_PROFILING
788 lockmgr_note_shared_acquire(lk, contested, waittime,
791 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
798 _lockmgr_assert(lk, KA_SLOCKED, file, line);
800 x = v & LK_ALL_WAITERS;
801 v &= LK_EXCLUSIVE_SPINNERS;
804 * Try to switch from one shared lock to an exclusive one.
805 * We need to preserve waiters flags during the operation.
807 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
809 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
811 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
812 LK_TRYWIT(flags), file, line);
813 TD_SLOCKS_DEC(curthread);
818 * In LK_TRYUPGRADE mode, do not drop the lock,
819 * returning EBUSY instead.
821 if (op == LK_TRYUPGRADE) {
822 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
829 * We have been unable to succeed in upgrading, so just
830 * give up the shared lock.
832 wakeup_swapper |= wakeupshlk(lk, file, line);
836 if (LK_CAN_WITNESS(flags))
837 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
838 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
842 * If curthread already holds the lock and this one is
843 * allowed to recurse, simply recurse on it.
845 if (lockmgr_xlocked(lk)) {
846 if ((flags & LK_CANRECURSE) == 0 &&
847 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
850 * If the lock is expected to not panic just
851 * give up and return.
853 if (LK_TRYOP(flags)) {
855 "%s: %p fails the try operation",
860 if (flags & LK_INTERLOCK)
861 class->lc_unlock(ilk);
862 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
863 __func__, iwmesg, file, line);
866 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
867 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
868 lk->lk_recurse, file, line);
869 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
870 LK_TRYWIT(flags), file, line);
871 TD_LOCKS_INC(curthread);
876 if (lk->lk_lock == LK_UNLOCKED &&
877 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
880 PMC_SOFT_CALL( , , lock, failed);
882 lock_profile_obtain_lock_failed(&lk->lock_object,
883 &contested, &waittime);
886 * If the lock is expected to not sleep just give up
889 if (LK_TRYOP(flags)) {
890 LOCK_LOG2(lk, "%s: %p fails the try operation",
897 * Acquire the sleepqueue chain lock because we
898 * probabilly will need to manipulate waiters flags.
900 sleepq_lock(&lk->lock_object);
904 * if the lock has been released while we spun on
905 * the sleepqueue chain lock just try again.
907 if (x == LK_UNLOCKED) {
908 sleepq_release(&lk->lock_object);
913 * The lock can be in the state where there is a
914 * pending queue of waiters, but still no owner.
915 * This happens when the lock is contested and an
916 * owner is going to claim the lock.
917 * If curthread is the one successfully acquiring it
918 * claim lock ownership and return, preserving waiters
921 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
922 if ((x & ~v) == LK_UNLOCKED) {
923 v &= ~LK_EXCLUSIVE_SPINNERS;
924 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
926 sleepq_release(&lk->lock_object);
928 "%s: %p claimed by a new writer",
932 sleepq_release(&lk->lock_object);
937 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
938 * fail, loop back and retry.
940 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
941 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
942 x | LK_EXCLUSIVE_WAITERS)) {
943 sleepq_release(&lk->lock_object);
946 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
951 * As far as we have been unable to acquire the
952 * exclusive lock and the exclusive waiters flag
953 * is set, we will sleep.
955 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
957 flags &= ~LK_INTERLOCK;
960 "%s: interrupted sleep for %p with %d",
961 __func__, lk, error);
964 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
968 #ifdef LOCK_PROFILING
969 lockmgr_note_exclusive_acquire(lk, contested, waittime,
972 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
978 _lockmgr_assert(lk, KA_XLOCKED, file, line);
979 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
980 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
983 * Panic if the lock is recursed.
985 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
986 if (flags & LK_INTERLOCK)
987 class->lc_unlock(ilk);
988 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
989 __func__, iwmesg, file, line);
991 TD_SLOCKS_INC(curthread);
994 * In order to preserve waiters flags, just spin.
998 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1000 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1001 LK_SHARERS_LOCK(1) | x))
1007 _lockmgr_assert(lk, KA_LOCKED, file, line);
1010 if ((x & LK_SHARE) == 0) {
1013 * As first option, treact the lock as if it has not
1015 * Fix-up the tid var if the lock has been disowned.
1017 if (LK_HOLDER(x) == LK_KERNPROC)
1020 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1022 TD_LOCKS_DEC(curthread);
1024 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1025 lk->lk_recurse, file, line);
1028 * The lock is held in exclusive mode.
1029 * If the lock is recursed also, then unrecurse it.
1031 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1032 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1037 if (tid != LK_KERNPROC)
1038 lock_profile_release_lock(&lk->lock_object);
1040 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1044 sleepq_lock(&lk->lock_object);
1049 * If the lock has exclusive waiters, give them
1050 * preference in order to avoid deadlock with
1051 * shared runners up.
1052 * If interruptible sleeps left the exclusive queue
1053 * empty avoid a starvation for the threads sleeping
1054 * on the shared queue by giving them precedence
1055 * and cleaning up the exclusive waiters bit anyway.
1056 * Please note that lk_exslpfail count may be lying
1057 * about the real number of waiters with the
1058 * LK_SLEEPFAIL flag on because they may be used in
1059 * conjunction with interruptible sleeps so
1060 * lk_exslpfail might be considered an 'upper limit'
1061 * bound, including the edge cases.
1063 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1064 realexslp = sleepq_sleepcnt(&lk->lock_object,
1065 SQ_EXCLUSIVE_QUEUE);
1066 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1067 if (lk->lk_exslpfail < realexslp) {
1068 lk->lk_exslpfail = 0;
1069 queue = SQ_EXCLUSIVE_QUEUE;
1070 v |= (x & LK_SHARED_WAITERS);
1072 lk->lk_exslpfail = 0;
1074 "%s: %p has only LK_SLEEPFAIL sleepers",
1077 "%s: %p waking up threads on the exclusive queue",
1080 sleepq_broadcast(&lk->lock_object,
1081 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1082 queue = SQ_SHARED_QUEUE;
1087 * Exclusive waiters sleeping with LK_SLEEPFAIL
1088 * on and using interruptible sleeps/timeout
1089 * may have left spourious lk_exslpfail counts
1090 * on, so clean it up anyway.
1092 lk->lk_exslpfail = 0;
1093 queue = SQ_SHARED_QUEUE;
1097 "%s: %p waking up threads on the %s queue",
1098 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1100 atomic_store_rel_ptr(&lk->lk_lock, v);
1101 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1102 SLEEPQ_LK, 0, queue);
1103 sleepq_release(&lk->lock_object);
1106 wakeup_swapper = wakeupshlk(lk, file, line);
1109 if (LK_CAN_WITNESS(flags))
1110 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1111 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1115 * Trying to drain a lock we already own will result in a
1118 if (lockmgr_xlocked(lk)) {
1119 if (flags & LK_INTERLOCK)
1120 class->lc_unlock(ilk);
1121 panic("%s: draining %s with the lock held @ %s:%d\n",
1122 __func__, iwmesg, file, line);
1126 if (lk->lk_lock == LK_UNLOCKED &&
1127 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1131 PMC_SOFT_CALL( , , lock, failed);
1133 lock_profile_obtain_lock_failed(&lk->lock_object,
1134 &contested, &waittime);
1137 * If the lock is expected to not sleep just give up
1140 if (LK_TRYOP(flags)) {
1141 LOCK_LOG2(lk, "%s: %p fails the try operation",
1148 * Acquire the sleepqueue chain lock because we
1149 * probabilly will need to manipulate waiters flags.
1151 sleepq_lock(&lk->lock_object);
1155 * if the lock has been released while we spun on
1156 * the sleepqueue chain lock just try again.
1158 if (x == LK_UNLOCKED) {
1159 sleepq_release(&lk->lock_object);
1163 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1164 if ((x & ~v) == LK_UNLOCKED) {
1165 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1168 * If interruptible sleeps left the exclusive
1169 * queue empty avoid a starvation for the
1170 * threads sleeping on the shared queue by
1171 * giving them precedence and cleaning up the
1172 * exclusive waiters bit anyway.
1173 * Please note that lk_exslpfail count may be
1174 * lying about the real number of waiters with
1175 * the LK_SLEEPFAIL flag on because they may
1176 * be used in conjunction with interruptible
1177 * sleeps so lk_exslpfail might be considered
1178 * an 'upper limit' bound, including the edge
1181 if (v & LK_EXCLUSIVE_WAITERS) {
1182 queue = SQ_EXCLUSIVE_QUEUE;
1183 v &= ~LK_EXCLUSIVE_WAITERS;
1187 * Exclusive waiters sleeping with
1188 * LK_SLEEPFAIL on and using
1189 * interruptible sleeps/timeout may
1190 * have left spourious lk_exslpfail
1191 * counts on, so clean it up anyway.
1193 MPASS(v & LK_SHARED_WAITERS);
1194 lk->lk_exslpfail = 0;
1195 queue = SQ_SHARED_QUEUE;
1196 v &= ~LK_SHARED_WAITERS;
1198 if (queue == SQ_EXCLUSIVE_QUEUE) {
1200 sleepq_sleepcnt(&lk->lock_object,
1201 SQ_EXCLUSIVE_QUEUE);
1202 if (lk->lk_exslpfail >= realexslp) {
1203 lk->lk_exslpfail = 0;
1204 queue = SQ_SHARED_QUEUE;
1205 v &= ~LK_SHARED_WAITERS;
1206 if (realexslp != 0) {
1208 "%s: %p has only LK_SLEEPFAIL sleepers",
1211 "%s: %p waking up threads on the exclusive queue",
1217 SQ_EXCLUSIVE_QUEUE);
1220 lk->lk_exslpfail = 0;
1222 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1223 sleepq_release(&lk->lock_object);
1227 "%s: %p waking up all threads on the %s queue",
1228 __func__, lk, queue == SQ_SHARED_QUEUE ?
1229 "shared" : "exclusive");
1230 wakeup_swapper |= sleepq_broadcast(
1231 &lk->lock_object, SLEEPQ_LK, 0, queue);
1234 * If shared waiters have been woken up we need
1235 * to wait for one of them to acquire the lock
1236 * before to set the exclusive waiters in
1237 * order to avoid a deadlock.
1239 if (queue == SQ_SHARED_QUEUE) {
1240 for (v = lk->lk_lock;
1241 (v & LK_SHARE) && !LK_SHARERS(v);
1248 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1249 * fail, loop back and retry.
1251 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1252 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1253 x | LK_EXCLUSIVE_WAITERS)) {
1254 sleepq_release(&lk->lock_object);
1257 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1262 * As far as we have been unable to acquire the
1263 * exclusive lock and the exclusive waiters flag
1264 * is set, we will sleep.
1266 if (flags & LK_INTERLOCK) {
1267 class->lc_unlock(ilk);
1268 flags &= ~LK_INTERLOCK;
1271 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1272 SQ_EXCLUSIVE_QUEUE);
1273 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1275 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1280 lock_profile_obtain_lock_success(&lk->lock_object,
1281 contested, waittime, file, line);
1282 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1283 lk->lk_recurse, file, line);
1284 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1285 LK_TRYWIT(flags), file, line);
1286 TD_LOCKS_INC(curthread);
1291 if (flags & LK_INTERLOCK)
1292 class->lc_unlock(ilk);
1293 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1296 if (flags & LK_INTERLOCK)
1297 class->lc_unlock(ilk);
1305 _lockmgr_disown(struct lock *lk, const char *file, int line)
1309 if (SCHEDULER_STOPPED())
1312 tid = (uintptr_t)curthread;
1313 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1316 * Panic if the lock is recursed.
1318 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1319 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1320 __func__, file, line);
1323 * If the owner is already LK_KERNPROC just skip the whole operation.
1325 if (LK_HOLDER(lk->lk_lock) != tid)
1327 lock_profile_release_lock(&lk->lock_object);
1328 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1329 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1330 TD_LOCKS_DEC(curthread);
1334 * In order to preserve waiters flags, just spin.
1338 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1339 x &= LK_ALL_WAITERS;
1340 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1348 lockmgr_printinfo(const struct lock *lk)
1353 if (lk->lk_lock == LK_UNLOCKED)
1354 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1355 else if (lk->lk_lock & LK_SHARE)
1356 printf("lock type %s: SHARED (count %ju)\n",
1357 lk->lock_object.lo_name,
1358 (uintmax_t)LK_SHARERS(lk->lk_lock));
1360 td = lockmgr_xholder(lk);
1361 if (td == (struct thread *)LK_KERNPROC)
1362 printf("lock type %s: EXCL by KERNPROC\n",
1363 lk->lock_object.lo_name);
1365 printf("lock type %s: EXCL by thread %p "
1366 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1367 td, td->td_proc->p_pid, td->td_proc->p_comm,
1372 if (x & LK_EXCLUSIVE_WAITERS)
1373 printf(" with exclusive waiters pending\n");
1374 if (x & LK_SHARED_WAITERS)
1375 printf(" with shared waiters pending\n");
1376 if (x & LK_EXCLUSIVE_SPINNERS)
1377 printf(" with exclusive spinners pending\n");
1383 lockstatus(const struct lock *lk)
1392 if ((x & LK_SHARE) == 0) {
1393 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1397 } else if (x == LK_UNLOCKED)
1403 #ifdef INVARIANT_SUPPORT
1405 FEATURE(invariant_support,
1406 "Support for modules compiled with INVARIANTS option");
1409 #undef _lockmgr_assert
1413 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1417 if (panicstr != NULL)
1421 case KA_SLOCKED | KA_NOTRECURSED:
1422 case KA_SLOCKED | KA_RECURSED:
1425 case KA_LOCKED | KA_NOTRECURSED:
1426 case KA_LOCKED | KA_RECURSED:
1430 * We cannot trust WITNESS if the lock is held in exclusive
1431 * mode and a call to lockmgr_disown() happened.
1432 * Workaround this skipping the check if the lock is held in
1433 * exclusive mode even for the KA_LOCKED case.
1435 if (slocked || (lk->lk_lock & LK_SHARE)) {
1436 witness_assert(&lk->lock_object, what, file, line);
1440 if (lk->lk_lock == LK_UNLOCKED ||
1441 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1442 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1443 panic("Lock %s not %slocked @ %s:%d\n",
1444 lk->lock_object.lo_name, slocked ? "share" : "",
1447 if ((lk->lk_lock & LK_SHARE) == 0) {
1448 if (lockmgr_recursed(lk)) {
1449 if (what & KA_NOTRECURSED)
1450 panic("Lock %s recursed @ %s:%d\n",
1451 lk->lock_object.lo_name, file,
1453 } else if (what & KA_RECURSED)
1454 panic("Lock %s not recursed @ %s:%d\n",
1455 lk->lock_object.lo_name, file, line);
1459 case KA_XLOCKED | KA_NOTRECURSED:
1460 case KA_XLOCKED | KA_RECURSED:
1461 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1462 panic("Lock %s not exclusively locked @ %s:%d\n",
1463 lk->lock_object.lo_name, file, line);
1464 if (lockmgr_recursed(lk)) {
1465 if (what & KA_NOTRECURSED)
1466 panic("Lock %s recursed @ %s:%d\n",
1467 lk->lock_object.lo_name, file, line);
1468 } else if (what & KA_RECURSED)
1469 panic("Lock %s not recursed @ %s:%d\n",
1470 lk->lock_object.lo_name, file, line);
1473 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1474 panic("Lock %s exclusively locked @ %s:%d\n",
1475 lk->lock_object.lo_name, file, line);
1478 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1486 lockmgr_chain(struct thread *td, struct thread **ownerp)
1492 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1494 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1495 if (lk->lk_lock & LK_SHARE)
1496 db_printf("SHARED (count %ju)\n",
1497 (uintmax_t)LK_SHARERS(lk->lk_lock));
1499 db_printf("EXCL\n");
1500 *ownerp = lockmgr_xholder(lk);
1506 db_show_lockmgr(const struct lock_object *lock)
1509 const struct lock *lk;
1511 lk = (const struct lock *)lock;
1513 db_printf(" state: ");
1514 if (lk->lk_lock == LK_UNLOCKED)
1515 db_printf("UNLOCKED\n");
1516 else if (lk->lk_lock & LK_SHARE)
1517 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1519 td = lockmgr_xholder(lk);
1520 if (td == (struct thread *)LK_KERNPROC)
1521 db_printf("XLOCK: LK_KERNPROC\n");
1523 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1524 td->td_tid, td->td_proc->p_pid,
1525 td->td_proc->p_comm);
1526 if (lockmgr_recursed(lk))
1527 db_printf(" recursed: %d\n", lk->lk_recurse);
1529 db_printf(" waiters: ");
1530 switch (lk->lk_lock & LK_ALL_WAITERS) {
1531 case LK_SHARED_WAITERS:
1532 db_printf("shared\n");
1534 case LK_EXCLUSIVE_WAITERS:
1535 db_printf("exclusive\n");
1537 case LK_ALL_WAITERS:
1538 db_printf("shared and exclusive\n");
1541 db_printf("none\n");
1543 db_printf(" spinners: ");
1544 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1545 db_printf("exclusive\n");
1547 db_printf("none\n");