2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_adaptive_lockmgrs.h"
31 #include "opt_kdtrace.h"
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/mutex.h>
43 #include <sys/sleepqueue.h>
45 #include <sys/stack.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
50 #include <machine/cpu.h>
56 CTASSERT(((LK_ADAPTIVE | LK_EXSLPFAIL | LK_NOSHARE) & LO_CLASSFLAGS) ==
57 (LK_ADAPTIVE | LK_EXSLPFAIL | LK_NOSHARE));
58 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
59 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61 #define SQ_EXCLUSIVE_QUEUE 0
62 #define SQ_SHARED_QUEUE 1
64 #ifdef ADAPTIVE_LOCKMGRS
65 #define ALK_RETRIES 10
66 #define ALK_LOOPS 10000
70 #define _lockmgr_assert(lk, what, file, line)
71 #define TD_LOCKS_INC(td)
72 #define TD_LOCKS_DEC(td)
74 #define TD_LOCKS_INC(td) ((td)->td_locks++)
75 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
77 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
78 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
81 #define STACK_PRINT(lk)
82 #define STACK_SAVE(lk)
83 #define STACK_ZERO(lk)
85 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
86 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
87 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
90 #define LOCK_LOG2(lk, string, arg1, arg2) \
91 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
92 CTR2(KTR_LOCK, (string), (arg1), (arg2))
93 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
94 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
95 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97 #define GIANT_DECLARE \
99 WITNESS_SAVE_DECL(Giant)
100 #define GIANT_RESTORE() do { \
104 WITNESS_RESTORE(&Giant.lock_object, Giant); \
107 #define GIANT_SAVE() do { \
108 if (mtx_owned(&Giant)) { \
109 WITNESS_SAVE(&Giant.lock_object, Giant); \
110 while (mtx_owned(&Giant)) { \
112 mtx_unlock(&Giant); \
117 #define LK_CAN_SHARE(x) \
118 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
119 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \
120 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
121 #define LK_TRYOP(x) \
124 #define LK_CAN_WITNESS(x) \
125 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
126 #define LK_TRYWIT(x) \
127 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
129 #define LK_CAN_ADAPT(lk, f) \
130 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
131 ((f) & LK_SLEEPFAIL) == 0)
133 #define lockmgr_disowned(lk) \
134 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
136 #define lockmgr_xlocked(lk) \
137 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
139 static void assert_lockmgr(struct lock_object *lock, int how);
141 static void db_show_lockmgr(struct lock_object *lock);
143 static void lock_lockmgr(struct lock_object *lock, int how);
145 static int owner_lockmgr(struct lock_object *lock, struct thread **owner);
147 static int unlock_lockmgr(struct lock_object *lock);
149 struct lock_class lock_class_lockmgr = {
150 .lc_name = "lockmgr",
151 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
152 .lc_assert = assert_lockmgr,
154 .lc_ddb_show = db_show_lockmgr,
156 .lc_lock = lock_lockmgr,
157 .lc_unlock = unlock_lockmgr,
159 .lc_owner = owner_lockmgr,
163 static __inline struct thread *
164 lockmgr_xholder(struct lock *lk)
169 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
173 * It assumes sleepq_lock held and returns with this one unheld.
174 * It also assumes the generic interlock is sane and previously checked.
175 * If LK_INTERLOCK is specified the interlock is not reacquired after the
179 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
180 const char *wmesg, int pri, int timo, int queue)
183 struct lock_class *class;
186 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
187 catch = pri & PCATCH;
191 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
192 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
194 if (flags & LK_INTERLOCK)
195 class->lc_unlock(ilk);
198 * LK_EXSLPFAIL is not invariant during the lock pattern but it is
199 * always protected by the sleepqueue spinlock, thus it is safe to
200 * handle within the lo_flags.
202 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
203 lk->lock_object.lo_flags |= LK_EXSLPFAIL;
205 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
206 SLEEPQ_INTERRUPTIBLE : 0), queue);
207 if ((flags & LK_TIMELOCK) && timo)
208 sleepq_set_timeout(&lk->lock_object, timo);
211 * Decisional switch for real sleeping.
213 if ((flags & LK_TIMELOCK) && timo && catch)
214 error = sleepq_timedwait_sig(&lk->lock_object, pri);
215 else if ((flags & LK_TIMELOCK) && timo)
216 error = sleepq_timedwait(&lk->lock_object, pri);
218 error = sleepq_wait_sig(&lk->lock_object, pri);
220 sleepq_wait(&lk->lock_object, pri);
222 if ((flags & LK_SLEEPFAIL) && error == 0)
229 wakeupshlk(struct lock *lk, const char *file, int line)
233 int queue, wakeup_swapper;
235 TD_LOCKS_DEC(curthread);
236 TD_SLOCKS_DEC(curthread);
237 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
238 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
245 * If there is more than one shared lock held, just drop one
248 if (LK_SHARERS(x) > 1) {
249 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
256 * If there are not waiters on the exclusive queue, drop the
259 if ((x & LK_ALL_WAITERS) == 0) {
260 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
262 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
268 * We should have a sharer with waiters, so enter the hard
269 * path in order to handle wakeups correctly.
271 sleepq_lock(&lk->lock_object);
272 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
276 * If the lock has exclusive waiters, give them preference in
277 * order to avoid deadlock with shared runners up.
278 * If interruptible sleeps left the exclusive queue empty
279 * avoid a starvation for the threads sleeping on the shared
280 * queue by giving them precedence and cleaning up the
281 * exclusive waiters bit anyway.
282 * Please note that the LK_EXSLPFAIL flag may be lying about
283 * the real presence of waiters with the LK_SLEEPFAIL flag on
284 * because they may be used in conjuction with interruptible
287 realexslp = sleepq_sleepcnt(&lk->lock_object,
289 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
290 if ((lk->lock_object.lo_flags & LK_EXSLPFAIL) == 0) {
291 lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
292 queue = SQ_EXCLUSIVE_QUEUE;
293 v |= (x & LK_SHARED_WAITERS);
295 lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
297 "%s: %p has only LK_SLEEPFAIL sleepers",
300 "%s: %p waking up threads on the exclusive queue",
303 sleepq_broadcast(&lk->lock_object,
304 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
305 queue = SQ_SHARED_QUEUE;
311 * Exclusive waiters sleeping with LK_SLEEPFAIL on
312 * and using interruptible sleeps/timeout may have
313 * left spourious LK_EXSLPFAIL flag on, so clean
316 lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
317 queue = SQ_SHARED_QUEUE;
320 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
322 sleepq_release(&lk->lock_object);
325 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
326 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
328 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
330 sleepq_release(&lk->lock_object);
334 lock_profile_release_lock(&lk->lock_object);
335 return (wakeup_swapper);
339 assert_lockmgr(struct lock_object *lock, int what)
342 panic("lockmgr locks do not support assertions");
346 lock_lockmgr(struct lock_object *lock, int how)
349 panic("lockmgr locks do not support sleep interlocking");
353 unlock_lockmgr(struct lock_object *lock)
356 panic("lockmgr locks do not support sleep interlocking");
361 owner_lockmgr(struct lock_object *lock, struct thread **owner)
364 panic("lockmgr locks do not support owner inquiring");
369 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
373 MPASS((flags & ~LK_INIT_MASK) == 0);
374 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
375 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
378 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
379 if (flags & LK_CANRECURSE)
380 iflags |= LO_RECURSABLE;
381 if ((flags & LK_NODUP) == 0)
383 if (flags & LK_NOPROFILE)
384 iflags |= LO_NOPROFILE;
385 if ((flags & LK_NOWITNESS) == 0)
386 iflags |= LO_WITNESS;
387 if (flags & LK_QUIET)
389 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
391 lk->lk_lock = LK_UNLOCKED;
395 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
400 lockdestroy(struct lock *lk)
403 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
404 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
405 KASSERT((lk->lock_object.lo_flags & LK_EXSLPFAIL) == 0,
406 ("lockmgr still exclusive waiters"));
407 lock_destroy(&lk->lock_object);
411 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
412 const char *wmesg, int pri, int timo, const char *file, int line)
415 struct lock_class *class;
419 int error, ipri, itimo, queue, wakeup_swapper;
420 #ifdef LOCK_PROFILING
421 uint64_t waittime = 0;
424 #ifdef ADAPTIVE_LOCKMGRS
425 volatile struct thread *owner;
426 u_int i, spintries = 0;
430 tid = (uintptr_t)curthread;
431 op = (flags & LK_TYPE_MASK);
432 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
433 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
434 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
436 MPASS((flags & ~LK_TOTAL_MASK) == 0);
437 KASSERT((op & (op - 1)) == 0,
438 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
439 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
440 (op != LK_DOWNGRADE && op != LK_RELEASE),
441 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
442 __func__, file, line));
443 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
444 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
445 __func__, file, line));
447 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
448 if (panicstr != NULL) {
449 if (flags & LK_INTERLOCK)
450 class->lc_unlock(ilk);
454 if (lk->lock_object.lo_flags & LK_NOSHARE) {
461 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
470 if (LK_CAN_WITNESS(flags))
471 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
477 * If no other thread has an exclusive lock, or
478 * no exclusive waiter is present, bump the count of
479 * sharers. Since we have to preserve the state of
480 * waiters, if we fail to acquire the shared lock
481 * loop back and retry.
483 if (LK_CAN_SHARE(x)) {
484 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
489 lock_profile_obtain_lock_failed(&lk->lock_object,
490 &contested, &waittime);
493 * If the lock is already held by curthread in
494 * exclusive way avoid a deadlock.
496 if (LK_HOLDER(x) == tid) {
498 "%s: %p already held in exclusive mode",
505 * If the lock is expected to not sleep just give up
508 if (LK_TRYOP(flags)) {
509 LOCK_LOG2(lk, "%s: %p fails the try operation",
515 #ifdef ADAPTIVE_LOCKMGRS
517 * If the owner is running on another CPU, spin until
518 * the owner stops running or the state of the lock
519 * changes. We need a double-state handle here
520 * because for a failed acquisition the lock can be
521 * either held in exclusive mode or shared mode
522 * (for the writer starvation avoidance technique).
524 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
525 LK_HOLDER(x) != LK_KERNPROC) {
526 owner = (struct thread *)LK_HOLDER(x);
527 if (LOCK_LOG_TEST(&lk->lock_object, 0))
529 "%s: spinning on %p held by %p",
530 __func__, lk, owner);
533 * If we are holding also an interlock drop it
534 * in order to avoid a deadlock if the lockmgr
535 * owner is adaptively spinning on the
538 if (flags & LK_INTERLOCK) {
539 class->lc_unlock(ilk);
540 flags &= ~LK_INTERLOCK;
543 while (LK_HOLDER(lk->lk_lock) ==
544 (uintptr_t)owner && TD_IS_RUNNING(owner))
548 } else if (LK_CAN_ADAPT(lk, flags) &&
549 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
550 spintries < ALK_RETRIES) {
551 if (flags & LK_INTERLOCK) {
552 class->lc_unlock(ilk);
553 flags &= ~LK_INTERLOCK;
557 for (i = 0; i < ALK_LOOPS; i++) {
558 if (LOCK_LOG_TEST(&lk->lock_object, 0))
560 "%s: shared spinning on %p with %u and %u",
561 __func__, lk, spintries, i);
563 if ((x & LK_SHARE) == 0 ||
564 LK_CAN_SHARE(x) != 0)
575 * Acquire the sleepqueue chain lock because we
576 * probabilly will need to manipulate waiters flags.
578 sleepq_lock(&lk->lock_object);
582 * if the lock can be acquired in shared mode, try
585 if (LK_CAN_SHARE(x)) {
586 sleepq_release(&lk->lock_object);
590 #ifdef ADAPTIVE_LOCKMGRS
592 * The current lock owner might have started executing
593 * on another CPU (or the lock could have changed
594 * owner) while we were waiting on the turnstile
595 * chain lock. If so, drop the turnstile lock and try
598 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
599 LK_HOLDER(x) != LK_KERNPROC) {
600 owner = (struct thread *)LK_HOLDER(x);
601 if (TD_IS_RUNNING(owner)) {
602 sleepq_release(&lk->lock_object);
609 * Try to set the LK_SHARED_WAITERS flag. If we fail,
610 * loop back and retry.
612 if ((x & LK_SHARED_WAITERS) == 0) {
613 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
614 x | LK_SHARED_WAITERS)) {
615 sleepq_release(&lk->lock_object);
618 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
623 * As far as we have been unable to acquire the
624 * shared lock and the shared waiters flag is set,
627 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
629 flags &= ~LK_INTERLOCK;
632 "%s: interrupted sleep for %p with %d",
633 __func__, lk, error);
636 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
640 lock_profile_obtain_lock_success(&lk->lock_object,
641 contested, waittime, file, line);
642 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
644 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
646 TD_LOCKS_INC(curthread);
647 TD_SLOCKS_INC(curthread);
652 _lockmgr_assert(lk, KA_SLOCKED, file, line);
654 x = v & LK_ALL_WAITERS;
655 v &= LK_EXCLUSIVE_SPINNERS;
658 * Try to switch from one shared lock to an exclusive one.
659 * We need to preserve waiters flags during the operation.
661 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
663 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
665 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
666 LK_TRYWIT(flags), file, line);
667 TD_SLOCKS_DEC(curthread);
672 * We have been unable to succeed in upgrading, so just
673 * give up the shared lock.
675 wakeup_swapper |= wakeupshlk(lk, file, line);
679 if (LK_CAN_WITNESS(flags))
680 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
681 LOP_EXCLUSIVE, file, line, ilk);
684 * If curthread already holds the lock and this one is
685 * allowed to recurse, simply recurse on it.
687 if (lockmgr_xlocked(lk)) {
688 if ((flags & LK_CANRECURSE) == 0 &&
689 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
692 * If the lock is expected to not panic just
693 * give up and return.
695 if (LK_TRYOP(flags)) {
697 "%s: %p fails the try operation",
702 if (flags & LK_INTERLOCK)
703 class->lc_unlock(ilk);
704 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
705 __func__, iwmesg, file, line);
708 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
709 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
710 lk->lk_recurse, file, line);
711 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
712 LK_TRYWIT(flags), file, line);
713 TD_LOCKS_INC(curthread);
717 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
719 lock_profile_obtain_lock_failed(&lk->lock_object,
720 &contested, &waittime);
723 * If the lock is expected to not sleep just give up
726 if (LK_TRYOP(flags)) {
727 LOCK_LOG2(lk, "%s: %p fails the try operation",
733 #ifdef ADAPTIVE_LOCKMGRS
735 * If the owner is running on another CPU, spin until
736 * the owner stops running or the state of the lock
740 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
741 LK_HOLDER(x) != LK_KERNPROC) {
742 owner = (struct thread *)LK_HOLDER(x);
743 if (LOCK_LOG_TEST(&lk->lock_object, 0))
745 "%s: spinning on %p held by %p",
746 __func__, lk, owner);
749 * If we are holding also an interlock drop it
750 * in order to avoid a deadlock if the lockmgr
751 * owner is adaptively spinning on the
754 if (flags & LK_INTERLOCK) {
755 class->lc_unlock(ilk);
756 flags &= ~LK_INTERLOCK;
759 while (LK_HOLDER(lk->lk_lock) ==
760 (uintptr_t)owner && TD_IS_RUNNING(owner))
764 } else if (LK_CAN_ADAPT(lk, flags) &&
765 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
766 spintries < ALK_RETRIES) {
767 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
768 !atomic_cmpset_ptr(&lk->lk_lock, x,
769 x | LK_EXCLUSIVE_SPINNERS))
771 if (flags & LK_INTERLOCK) {
772 class->lc_unlock(ilk);
773 flags &= ~LK_INTERLOCK;
777 for (i = 0; i < ALK_LOOPS; i++) {
778 if (LOCK_LOG_TEST(&lk->lock_object, 0))
780 "%s: shared spinning on %p with %u and %u",
781 __func__, lk, spintries, i);
783 LK_EXCLUSIVE_SPINNERS) == 0)
794 * Acquire the sleepqueue chain lock because we
795 * probabilly will need to manipulate waiters flags.
797 sleepq_lock(&lk->lock_object);
801 * if the lock has been released while we spun on
802 * the sleepqueue chain lock just try again.
804 if (x == LK_UNLOCKED) {
805 sleepq_release(&lk->lock_object);
809 #ifdef ADAPTIVE_LOCKMGRS
811 * The current lock owner might have started executing
812 * on another CPU (or the lock could have changed
813 * owner) while we were waiting on the turnstile
814 * chain lock. If so, drop the turnstile lock and try
817 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
818 LK_HOLDER(x) != LK_KERNPROC) {
819 owner = (struct thread *)LK_HOLDER(x);
820 if (TD_IS_RUNNING(owner)) {
821 sleepq_release(&lk->lock_object);
828 * The lock can be in the state where there is a
829 * pending queue of waiters, but still no owner.
830 * This happens when the lock is contested and an
831 * owner is going to claim the lock.
832 * If curthread is the one successfully acquiring it
833 * claim lock ownership and return, preserving waiters
836 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
837 if ((x & ~v) == LK_UNLOCKED) {
838 v &= ~LK_EXCLUSIVE_SPINNERS;
839 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
841 sleepq_release(&lk->lock_object);
843 "%s: %p claimed by a new writer",
847 sleepq_release(&lk->lock_object);
852 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
853 * fail, loop back and retry.
855 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
856 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
857 x | LK_EXCLUSIVE_WAITERS)) {
858 sleepq_release(&lk->lock_object);
861 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
866 * As far as we have been unable to acquire the
867 * exclusive lock and the exclusive waiters flag
868 * is set, we will sleep.
870 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
872 flags &= ~LK_INTERLOCK;
875 "%s: interrupted sleep for %p with %d",
876 __func__, lk, error);
879 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
883 lock_profile_obtain_lock_success(&lk->lock_object,
884 contested, waittime, file, line);
885 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
886 lk->lk_recurse, file, line);
887 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
888 LK_TRYWIT(flags), file, line);
889 TD_LOCKS_INC(curthread);
894 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
895 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
896 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
897 TD_SLOCKS_INC(curthread);
900 * In order to preserve waiters flags, just spin.
904 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
906 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
907 LK_SHARERS_LOCK(1) | x))
913 _lockmgr_assert(lk, KA_LOCKED, file, line);
916 if ((x & LK_SHARE) == 0) {
919 * As first option, treact the lock as if it has not
921 * Fix-up the tid var if the lock has been disowned.
923 if (LK_HOLDER(x) == LK_KERNPROC)
926 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
928 TD_LOCKS_DEC(curthread);
930 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
931 lk->lk_recurse, file, line);
934 * The lock is held in exclusive mode.
935 * If the lock is recursed also, then unrecurse it.
937 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
938 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
943 if (tid != LK_KERNPROC)
944 lock_profile_release_lock(&lk->lock_object);
946 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
950 sleepq_lock(&lk->lock_object);
955 * If the lock has exclusive waiters, give them
956 * preference in order to avoid deadlock with
958 * If interruptible sleeps left the exclusive queue
959 * empty avoid a starvation for the threads sleeping
960 * on the shared queue by giving them precedence
961 * and cleaning up the exclusive waiters bit anyway.
962 * Please note that the LK_EXSLPFAIL flag may be lying
963 * about the real presence of waiters with the
964 * LK_SLEEPFAIL flag on because they may be used in
965 * conjuction with interruptible sleeps.
967 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
968 realexslp = sleepq_sleepcnt(&lk->lock_object,
970 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
971 if ((lk->lock_object.lo_flags &
972 LK_EXSLPFAIL) == 0) {
973 lk->lock_object.lo_flags &=
975 queue = SQ_EXCLUSIVE_QUEUE;
976 v |= (x & LK_SHARED_WAITERS);
978 lk->lock_object.lo_flags &=
981 "%s: %p has only LK_SLEEPFAIL sleepers",
984 "%s: %p waking up threads on the exclusive queue",
987 sleepq_broadcast(&lk->lock_object,
988 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
989 queue = SQ_SHARED_QUEUE;
994 * Exclusive waiters sleeping with LK_SLEEPFAIL
995 * on and using interruptible sleeps/timeout
996 * may have left spourious LK_EXSLPFAIL flag
997 * on, so clean it up anyway.
999 lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
1000 queue = SQ_SHARED_QUEUE;
1004 "%s: %p waking up threads on the %s queue",
1005 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1007 atomic_store_rel_ptr(&lk->lk_lock, v);
1008 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1009 SLEEPQ_LK, 0, queue);
1010 sleepq_release(&lk->lock_object);
1013 wakeup_swapper = wakeupshlk(lk, file, line);
1016 if (LK_CAN_WITNESS(flags))
1017 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1018 LOP_EXCLUSIVE, file, line, ilk);
1021 * Trying to drain a lock we already own will result in a
1024 if (lockmgr_xlocked(lk)) {
1025 if (flags & LK_INTERLOCK)
1026 class->lc_unlock(ilk);
1027 panic("%s: draining %s with the lock held @ %s:%d\n",
1028 __func__, iwmesg, file, line);
1031 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1032 lock_profile_obtain_lock_failed(&lk->lock_object,
1033 &contested, &waittime);
1036 * If the lock is expected to not sleep just give up
1039 if (LK_TRYOP(flags)) {
1040 LOCK_LOG2(lk, "%s: %p fails the try operation",
1047 * Acquire the sleepqueue chain lock because we
1048 * probabilly will need to manipulate waiters flags.
1050 sleepq_lock(&lk->lock_object);
1054 * if the lock has been released while we spun on
1055 * the sleepqueue chain lock just try again.
1057 if (x == LK_UNLOCKED) {
1058 sleepq_release(&lk->lock_object);
1062 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1063 if ((x & ~v) == LK_UNLOCKED) {
1064 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1067 * If interruptible sleeps left the exclusive
1068 * queue empty avoid a starvation for the
1069 * threads sleeping on the shared queue by
1070 * giving them precedence and cleaning up the
1071 * exclusive waiters bit anyway.
1072 * Please note that the LK_EXSLPFAIL flag may
1073 * be lying about the real presence of waiters
1074 * with the LK_SLEEPFAIL flag on because they
1075 * may be used in conjuction with interruptible
1078 if (v & LK_EXCLUSIVE_WAITERS) {
1079 queue = SQ_EXCLUSIVE_QUEUE;
1080 v &= ~LK_EXCLUSIVE_WAITERS;
1084 * Exclusive waiters sleeping with
1085 * LK_SLEEPFAIL on and using
1086 * interruptible sleeps/timeout may
1087 * have left spourious LK_EXSLPFAIL
1088 * flag on, so clean it up anyway.
1090 MPASS(v & LK_SHARED_WAITERS);
1091 lk->lock_object.lo_flags &=
1093 queue = SQ_SHARED_QUEUE;
1094 v &= ~LK_SHARED_WAITERS;
1096 if (queue == SQ_EXCLUSIVE_QUEUE) {
1098 sleepq_sleepcnt(&lk->lock_object,
1099 SQ_EXCLUSIVE_QUEUE);
1100 if ((lk->lock_object.lo_flags &
1101 LK_EXSLPFAIL) == 0) {
1102 lk->lock_object.lo_flags &=
1104 queue = SQ_SHARED_QUEUE;
1105 v &= ~LK_SHARED_WAITERS;
1106 if (realexslp != 0) {
1108 "%s: %p has only LK_SLEEPFAIL sleepers",
1111 "%s: %p waking up threads on the exclusive queue",
1117 SQ_EXCLUSIVE_QUEUE);
1120 lk->lock_object.lo_flags &=
1123 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1124 sleepq_release(&lk->lock_object);
1128 "%s: %p waking up all threads on the %s queue",
1129 __func__, lk, queue == SQ_SHARED_QUEUE ?
1130 "shared" : "exclusive");
1131 wakeup_swapper |= sleepq_broadcast(
1132 &lk->lock_object, SLEEPQ_LK, 0, queue);
1135 * If shared waiters have been woken up we need
1136 * to wait for one of them to acquire the lock
1137 * before to set the exclusive waiters in
1138 * order to avoid a deadlock.
1140 if (queue == SQ_SHARED_QUEUE) {
1141 for (v = lk->lk_lock;
1142 (v & LK_SHARE) && !LK_SHARERS(v);
1149 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1150 * fail, loop back and retry.
1152 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1153 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1154 x | LK_EXCLUSIVE_WAITERS)) {
1155 sleepq_release(&lk->lock_object);
1158 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1163 * As far as we have been unable to acquire the
1164 * exclusive lock and the exclusive waiters flag
1165 * is set, we will sleep.
1167 if (flags & LK_INTERLOCK) {
1168 class->lc_unlock(ilk);
1169 flags &= ~LK_INTERLOCK;
1172 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1173 SQ_EXCLUSIVE_QUEUE);
1174 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1176 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1181 lock_profile_obtain_lock_success(&lk->lock_object,
1182 contested, waittime, file, line);
1183 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1184 lk->lk_recurse, file, line);
1185 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1186 LK_TRYWIT(flags), file, line);
1187 TD_LOCKS_INC(curthread);
1192 if (flags & LK_INTERLOCK)
1193 class->lc_unlock(ilk);
1194 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1197 if (flags & LK_INTERLOCK)
1198 class->lc_unlock(ilk);
1206 _lockmgr_disown(struct lock *lk, const char *file, int line)
1210 if (SCHEDULER_STOPPED())
1213 tid = (uintptr_t)curthread;
1214 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1217 * If the owner is already LK_KERNPROC just skip the whole operation.
1219 if (LK_HOLDER(lk->lk_lock) != tid)
1221 lock_profile_release_lock(&lk->lock_object);
1222 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1223 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1224 TD_LOCKS_DEC(curthread);
1228 * In order to preserve waiters flags, just spin.
1232 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1233 x &= LK_ALL_WAITERS;
1234 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1242 lockmgr_printinfo(struct lock *lk)
1247 if (lk->lk_lock == LK_UNLOCKED)
1248 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1249 else if (lk->lk_lock & LK_SHARE)
1250 printf("lock type %s: SHARED (count %ju)\n",
1251 lk->lock_object.lo_name,
1252 (uintmax_t)LK_SHARERS(lk->lk_lock));
1254 td = lockmgr_xholder(lk);
1255 printf("lock type %s: EXCL by thread %p (pid %d)\n",
1256 lk->lock_object.lo_name, td, td->td_proc->p_pid);
1260 if (x & LK_EXCLUSIVE_WAITERS)
1261 printf(" with exclusive waiters pending\n");
1262 if (x & LK_SHARED_WAITERS)
1263 printf(" with shared waiters pending\n");
1264 if (x & LK_EXCLUSIVE_SPINNERS)
1265 printf(" with exclusive spinners pending\n");
1271 lockstatus(struct lock *lk)
1280 if ((x & LK_SHARE) == 0) {
1281 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1285 } else if (x == LK_UNLOCKED)
1291 #ifdef INVARIANT_SUPPORT
1293 #undef _lockmgr_assert
1297 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1301 if (panicstr != NULL)
1305 case KA_SLOCKED | KA_NOTRECURSED:
1306 case KA_SLOCKED | KA_RECURSED:
1309 case KA_LOCKED | KA_NOTRECURSED:
1310 case KA_LOCKED | KA_RECURSED:
1314 * We cannot trust WITNESS if the lock is held in exclusive
1315 * mode and a call to lockmgr_disown() happened.
1316 * Workaround this skipping the check if the lock is held in
1317 * exclusive mode even for the KA_LOCKED case.
1319 if (slocked || (lk->lk_lock & LK_SHARE)) {
1320 witness_assert(&lk->lock_object, what, file, line);
1324 if (lk->lk_lock == LK_UNLOCKED ||
1325 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1326 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1327 panic("Lock %s not %slocked @ %s:%d\n",
1328 lk->lock_object.lo_name, slocked ? "share" : "",
1331 if ((lk->lk_lock & LK_SHARE) == 0) {
1332 if (lockmgr_recursed(lk)) {
1333 if (what & KA_NOTRECURSED)
1334 panic("Lock %s recursed @ %s:%d\n",
1335 lk->lock_object.lo_name, file,
1337 } else if (what & KA_RECURSED)
1338 panic("Lock %s not recursed @ %s:%d\n",
1339 lk->lock_object.lo_name, file, line);
1343 case KA_XLOCKED | KA_NOTRECURSED:
1344 case KA_XLOCKED | KA_RECURSED:
1345 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1346 panic("Lock %s not exclusively locked @ %s:%d\n",
1347 lk->lock_object.lo_name, file, line);
1348 if (lockmgr_recursed(lk)) {
1349 if (what & KA_NOTRECURSED)
1350 panic("Lock %s recursed @ %s:%d\n",
1351 lk->lock_object.lo_name, file, line);
1352 } else if (what & KA_RECURSED)
1353 panic("Lock %s not recursed @ %s:%d\n",
1354 lk->lock_object.lo_name, file, line);
1357 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1358 panic("Lock %s exclusively locked @ %s:%d\n",
1359 lk->lock_object.lo_name, file, line);
1362 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1370 lockmgr_chain(struct thread *td, struct thread **ownerp)
1376 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1378 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1379 if (lk->lk_lock & LK_SHARE)
1380 db_printf("SHARED (count %ju)\n",
1381 (uintmax_t)LK_SHARERS(lk->lk_lock));
1383 db_printf("EXCL\n");
1384 *ownerp = lockmgr_xholder(lk);
1390 db_show_lockmgr(struct lock_object *lock)
1395 lk = (struct lock *)lock;
1397 db_printf(" state: ");
1398 if (lk->lk_lock == LK_UNLOCKED)
1399 db_printf("UNLOCKED\n");
1400 else if (lk->lk_lock & LK_SHARE)
1401 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1403 td = lockmgr_xholder(lk);
1404 if (td == (struct thread *)LK_KERNPROC)
1405 db_printf("XLOCK: LK_KERNPROC\n");
1407 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1408 td->td_tid, td->td_proc->p_pid,
1409 td->td_proc->p_comm);
1410 if (lockmgr_recursed(lk))
1411 db_printf(" recursed: %d\n", lk->lk_recurse);
1413 db_printf(" waiters: ");
1414 switch (lk->lk_lock & LK_ALL_WAITERS) {
1415 case LK_SHARED_WAITERS:
1416 db_printf("shared\n");
1418 case LK_EXCLUSIVE_WAITERS:
1419 db_printf("exclusive\n");
1421 case LK_ALL_WAITERS:
1422 db_printf("shared and exclusive\n");
1425 db_printf("none\n");
1427 db_printf(" spinners: ");
1428 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1429 db_printf("exclusive\n");
1431 db_printf("none\n");