2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_adaptive_lockmgrs.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_kdtrace.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
44 #include <sys/sleepqueue.h>
46 #include <sys/stack.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include <machine/cpu.h>
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63 (LK_ADAPTIVE | LK_NOSHARE));
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 #define SQ_EXCLUSIVE_QUEUE 0
68 #define SQ_SHARED_QUEUE 1
71 #define _lockmgr_assert(lk, what, file, line)
72 #define TD_LOCKS_INC(td)
73 #define TD_LOCKS_DEC(td)
75 #define TD_LOCKS_INC(td) ((td)->td_locks++)
76 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
78 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
79 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
82 #define STACK_PRINT(lk)
83 #define STACK_SAVE(lk)
84 #define STACK_ZERO(lk)
86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
87 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
88 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
91 #define LOCK_LOG2(lk, string, arg1, arg2) \
92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
93 CTR2(KTR_LOCK, (string), (arg1), (arg2))
94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
96 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98 #define GIANT_DECLARE \
100 WITNESS_SAVE_DECL(Giant)
101 #define GIANT_RESTORE() do { \
105 WITNESS_RESTORE(&Giant.lock_object, Giant); \
108 #define GIANT_SAVE() do { \
109 if (mtx_owned(&Giant)) { \
110 WITNESS_SAVE(&Giant.lock_object, Giant); \
111 while (mtx_owned(&Giant)) { \
113 mtx_unlock(&Giant); \
118 #define LK_CAN_SHARE(x) \
119 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
120 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \
121 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
122 #define LK_TRYOP(x) \
125 #define LK_CAN_WITNESS(x) \
126 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
127 #define LK_TRYWIT(x) \
128 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
130 #define LK_CAN_ADAPT(lk, f) \
131 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
132 ((f) & LK_SLEEPFAIL) == 0)
134 #define lockmgr_disowned(lk) \
135 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
137 #define lockmgr_xlocked(lk) \
138 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
140 static void assert_lockmgr(const struct lock_object *lock, int how);
142 static void db_show_lockmgr(const struct lock_object *lock);
144 static void lock_lockmgr(struct lock_object *lock, int how);
146 static int owner_lockmgr(const struct lock_object *lock,
147 struct thread **owner);
149 static int unlock_lockmgr(struct lock_object *lock);
151 struct lock_class lock_class_lockmgr = {
152 .lc_name = "lockmgr",
153 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
154 .lc_assert = assert_lockmgr,
156 .lc_ddb_show = db_show_lockmgr,
158 .lc_lock = lock_lockmgr,
159 .lc_unlock = unlock_lockmgr,
161 .lc_owner = owner_lockmgr,
165 #ifdef ADAPTIVE_LOCKMGRS
166 static u_int alk_retries = 10;
167 static u_int alk_loops = 10000;
168 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
169 "lockmgr debugging");
170 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
174 static __inline struct thread *
175 lockmgr_xholder(const struct lock *lk)
180 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
184 * It assumes sleepq_lock held and returns with this one unheld.
185 * It also assumes the generic interlock is sane and previously checked.
186 * If LK_INTERLOCK is specified the interlock is not reacquired after the
190 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
191 const char *wmesg, int pri, int timo, int queue)
194 struct lock_class *class;
197 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
198 catch = pri & PCATCH;
202 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
203 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
205 if (flags & LK_INTERLOCK)
206 class->lc_unlock(ilk);
207 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
210 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
211 SLEEPQ_INTERRUPTIBLE : 0), queue);
212 if ((flags & LK_TIMELOCK) && timo)
213 sleepq_set_timeout(&lk->lock_object, timo);
216 * Decisional switch for real sleeping.
218 if ((flags & LK_TIMELOCK) && timo && catch)
219 error = sleepq_timedwait_sig(&lk->lock_object, pri);
220 else if ((flags & LK_TIMELOCK) && timo)
221 error = sleepq_timedwait(&lk->lock_object, pri);
223 error = sleepq_wait_sig(&lk->lock_object, pri);
225 sleepq_wait(&lk->lock_object, pri);
227 if ((flags & LK_SLEEPFAIL) && error == 0)
234 wakeupshlk(struct lock *lk, const char *file, int line)
238 int queue, wakeup_swapper;
240 TD_LOCKS_DEC(curthread);
241 TD_SLOCKS_DEC(curthread);
242 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
243 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
250 * If there is more than one shared lock held, just drop one
253 if (LK_SHARERS(x) > 1) {
254 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
261 * If there are not waiters on the exclusive queue, drop the
264 if ((x & LK_ALL_WAITERS) == 0) {
265 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
267 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
273 * We should have a sharer with waiters, so enter the hard
274 * path in order to handle wakeups correctly.
276 sleepq_lock(&lk->lock_object);
277 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
281 * If the lock has exclusive waiters, give them preference in
282 * order to avoid deadlock with shared runners up.
283 * If interruptible sleeps left the exclusive queue empty
284 * avoid a starvation for the threads sleeping on the shared
285 * queue by giving them precedence and cleaning up the
286 * exclusive waiters bit anyway.
287 * Please note that lk_exslpfail count may be lying about
288 * the real number of waiters with the LK_SLEEPFAIL flag on
289 * because they may be used in conjuction with interruptible
290 * sleeps so lk_exslpfail might be considered an 'upper limit'
291 * bound, including the edge cases.
293 realexslp = sleepq_sleepcnt(&lk->lock_object,
295 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
296 if (lk->lk_exslpfail < realexslp) {
297 lk->lk_exslpfail = 0;
298 queue = SQ_EXCLUSIVE_QUEUE;
299 v |= (x & LK_SHARED_WAITERS);
301 lk->lk_exslpfail = 0;
303 "%s: %p has only LK_SLEEPFAIL sleepers",
306 "%s: %p waking up threads on the exclusive queue",
309 sleepq_broadcast(&lk->lock_object,
310 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
311 queue = SQ_SHARED_QUEUE;
317 * Exclusive waiters sleeping with LK_SLEEPFAIL on
318 * and using interruptible sleeps/timeout may have
319 * left spourious lk_exslpfail counts on, so clean
322 lk->lk_exslpfail = 0;
323 queue = SQ_SHARED_QUEUE;
326 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
328 sleepq_release(&lk->lock_object);
331 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
332 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
334 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
336 sleepq_release(&lk->lock_object);
340 lock_profile_release_lock(&lk->lock_object);
341 return (wakeup_swapper);
345 assert_lockmgr(const struct lock_object *lock, int what)
348 panic("lockmgr locks do not support assertions");
352 lock_lockmgr(struct lock_object *lock, int how)
355 panic("lockmgr locks do not support sleep interlocking");
359 unlock_lockmgr(struct lock_object *lock)
362 panic("lockmgr locks do not support sleep interlocking");
367 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
370 panic("lockmgr locks do not support owner inquiring");
375 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
379 MPASS((flags & ~LK_INIT_MASK) == 0);
380 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
381 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
384 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
385 if (flags & LK_CANRECURSE)
386 iflags |= LO_RECURSABLE;
387 if ((flags & LK_NODUP) == 0)
389 if (flags & LK_NOPROFILE)
390 iflags |= LO_NOPROFILE;
391 if ((flags & LK_NOWITNESS) == 0)
392 iflags |= LO_WITNESS;
393 if (flags & LK_QUIET)
395 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
397 lk->lk_lock = LK_UNLOCKED;
399 lk->lk_exslpfail = 0;
402 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
407 * XXX: Gross hacks to manipulate external lock flags after
408 * initialization. Used for certain vnode and buf locks.
411 lockallowshare(struct lock *lk)
414 lockmgr_assert(lk, KA_XLOCKED);
415 lk->lock_object.lo_flags &= ~LK_NOSHARE;
419 lockallowrecurse(struct lock *lk)
422 lockmgr_assert(lk, KA_XLOCKED);
423 lk->lock_object.lo_flags |= LO_RECURSABLE;
427 lockdisablerecurse(struct lock *lk)
430 lockmgr_assert(lk, KA_XLOCKED);
431 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
435 lockdestroy(struct lock *lk)
438 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
439 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
440 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
441 lock_destroy(&lk->lock_object);
445 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
446 const char *wmesg, int pri, int timo, const char *file, int line)
449 struct lock_class *class;
453 int error, ipri, itimo, queue, wakeup_swapper;
454 #ifdef LOCK_PROFILING
455 uint64_t waittime = 0;
458 #ifdef ADAPTIVE_LOCKMGRS
459 volatile struct thread *owner;
460 u_int i, spintries = 0;
464 tid = (uintptr_t)curthread;
465 op = (flags & LK_TYPE_MASK);
466 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
467 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
468 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
470 MPASS((flags & ~LK_TOTAL_MASK) == 0);
471 KASSERT((op & (op - 1)) == 0,
472 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
473 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
474 (op != LK_DOWNGRADE && op != LK_RELEASE),
475 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
476 __func__, file, line));
477 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
478 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
479 __func__, file, line));
480 KASSERT(!TD_IS_IDLETHREAD(curthread),
481 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
482 lk->lock_object.lo_name, file, line));
484 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
485 if (panicstr != NULL) {
486 if (flags & LK_INTERLOCK)
487 class->lc_unlock(ilk);
491 if (lk->lock_object.lo_flags & LK_NOSHARE) {
498 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
507 if (LK_CAN_WITNESS(flags))
508 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
514 * If no other thread has an exclusive lock, or
515 * no exclusive waiter is present, bump the count of
516 * sharers. Since we have to preserve the state of
517 * waiters, if we fail to acquire the shared lock
518 * loop back and retry.
520 if (LK_CAN_SHARE(x)) {
521 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
527 PMC_SOFT_CALL( , , lock, failed);
529 lock_profile_obtain_lock_failed(&lk->lock_object,
530 &contested, &waittime);
533 * If the lock is already held by curthread in
534 * exclusive way avoid a deadlock.
536 if (LK_HOLDER(x) == tid) {
538 "%s: %p already held in exclusive mode",
545 * If the lock is expected to not sleep just give up
548 if (LK_TRYOP(flags)) {
549 LOCK_LOG2(lk, "%s: %p fails the try operation",
555 #ifdef ADAPTIVE_LOCKMGRS
557 * If the owner is running on another CPU, spin until
558 * the owner stops running or the state of the lock
559 * changes. We need a double-state handle here
560 * because for a failed acquisition the lock can be
561 * either held in exclusive mode or shared mode
562 * (for the writer starvation avoidance technique).
564 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
565 LK_HOLDER(x) != LK_KERNPROC) {
566 owner = (struct thread *)LK_HOLDER(x);
567 if (LOCK_LOG_TEST(&lk->lock_object, 0))
569 "%s: spinning on %p held by %p",
570 __func__, lk, owner);
573 * If we are holding also an interlock drop it
574 * in order to avoid a deadlock if the lockmgr
575 * owner is adaptively spinning on the
578 if (flags & LK_INTERLOCK) {
579 class->lc_unlock(ilk);
580 flags &= ~LK_INTERLOCK;
583 while (LK_HOLDER(lk->lk_lock) ==
584 (uintptr_t)owner && TD_IS_RUNNING(owner))
588 } else if (LK_CAN_ADAPT(lk, flags) &&
589 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
590 spintries < alk_retries) {
591 if (flags & LK_INTERLOCK) {
592 class->lc_unlock(ilk);
593 flags &= ~LK_INTERLOCK;
597 for (i = 0; i < alk_loops; i++) {
598 if (LOCK_LOG_TEST(&lk->lock_object, 0))
600 "%s: shared spinning on %p with %u and %u",
601 __func__, lk, spintries, i);
603 if ((x & LK_SHARE) == 0 ||
604 LK_CAN_SHARE(x) != 0)
615 * Acquire the sleepqueue chain lock because we
616 * probabilly will need to manipulate waiters flags.
618 sleepq_lock(&lk->lock_object);
622 * if the lock can be acquired in shared mode, try
625 if (LK_CAN_SHARE(x)) {
626 sleepq_release(&lk->lock_object);
630 #ifdef ADAPTIVE_LOCKMGRS
632 * The current lock owner might have started executing
633 * on another CPU (or the lock could have changed
634 * owner) while we were waiting on the turnstile
635 * chain lock. If so, drop the turnstile lock and try
638 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
639 LK_HOLDER(x) != LK_KERNPROC) {
640 owner = (struct thread *)LK_HOLDER(x);
641 if (TD_IS_RUNNING(owner)) {
642 sleepq_release(&lk->lock_object);
649 * Try to set the LK_SHARED_WAITERS flag. If we fail,
650 * loop back and retry.
652 if ((x & LK_SHARED_WAITERS) == 0) {
653 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
654 x | LK_SHARED_WAITERS)) {
655 sleepq_release(&lk->lock_object);
658 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
663 * As far as we have been unable to acquire the
664 * shared lock and the shared waiters flag is set,
667 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
669 flags &= ~LK_INTERLOCK;
672 "%s: interrupted sleep for %p with %d",
673 __func__, lk, error);
676 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
680 lock_profile_obtain_lock_success(&lk->lock_object,
681 contested, waittime, file, line);
682 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
684 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
686 TD_LOCKS_INC(curthread);
687 TD_SLOCKS_INC(curthread);
692 _lockmgr_assert(lk, KA_SLOCKED, file, line);
694 x = v & LK_ALL_WAITERS;
695 v &= LK_EXCLUSIVE_SPINNERS;
698 * Try to switch from one shared lock to an exclusive one.
699 * We need to preserve waiters flags during the operation.
701 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
703 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
705 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
706 LK_TRYWIT(flags), file, line);
707 TD_SLOCKS_DEC(curthread);
712 * We have been unable to succeed in upgrading, so just
713 * give up the shared lock.
715 wakeup_swapper |= wakeupshlk(lk, file, line);
719 if (LK_CAN_WITNESS(flags))
720 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
721 LOP_EXCLUSIVE, file, line, ilk);
724 * If curthread already holds the lock and this one is
725 * allowed to recurse, simply recurse on it.
727 if (lockmgr_xlocked(lk)) {
728 if ((flags & LK_CANRECURSE) == 0 &&
729 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
732 * If the lock is expected to not panic just
733 * give up and return.
735 if (LK_TRYOP(flags)) {
737 "%s: %p fails the try operation",
742 if (flags & LK_INTERLOCK)
743 class->lc_unlock(ilk);
744 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
745 __func__, iwmesg, file, line);
748 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
749 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
750 lk->lk_recurse, file, line);
751 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
752 LK_TRYWIT(flags), file, line);
753 TD_LOCKS_INC(curthread);
757 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
760 PMC_SOFT_CALL( , , lock, failed);
762 lock_profile_obtain_lock_failed(&lk->lock_object,
763 &contested, &waittime);
766 * If the lock is expected to not sleep just give up
769 if (LK_TRYOP(flags)) {
770 LOCK_LOG2(lk, "%s: %p fails the try operation",
776 #ifdef ADAPTIVE_LOCKMGRS
778 * If the owner is running on another CPU, spin until
779 * the owner stops running or the state of the lock
783 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
784 LK_HOLDER(x) != LK_KERNPROC) {
785 owner = (struct thread *)LK_HOLDER(x);
786 if (LOCK_LOG_TEST(&lk->lock_object, 0))
788 "%s: spinning on %p held by %p",
789 __func__, lk, owner);
792 * If we are holding also an interlock drop it
793 * in order to avoid a deadlock if the lockmgr
794 * owner is adaptively spinning on the
797 if (flags & LK_INTERLOCK) {
798 class->lc_unlock(ilk);
799 flags &= ~LK_INTERLOCK;
802 while (LK_HOLDER(lk->lk_lock) ==
803 (uintptr_t)owner && TD_IS_RUNNING(owner))
807 } else if (LK_CAN_ADAPT(lk, flags) &&
808 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
809 spintries < alk_retries) {
810 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
811 !atomic_cmpset_ptr(&lk->lk_lock, x,
812 x | LK_EXCLUSIVE_SPINNERS))
814 if (flags & LK_INTERLOCK) {
815 class->lc_unlock(ilk);
816 flags &= ~LK_INTERLOCK;
820 for (i = 0; i < alk_loops; i++) {
821 if (LOCK_LOG_TEST(&lk->lock_object, 0))
823 "%s: shared spinning on %p with %u and %u",
824 __func__, lk, spintries, i);
826 LK_EXCLUSIVE_SPINNERS) == 0)
837 * Acquire the sleepqueue chain lock because we
838 * probabilly will need to manipulate waiters flags.
840 sleepq_lock(&lk->lock_object);
844 * if the lock has been released while we spun on
845 * the sleepqueue chain lock just try again.
847 if (x == LK_UNLOCKED) {
848 sleepq_release(&lk->lock_object);
852 #ifdef ADAPTIVE_LOCKMGRS
854 * The current lock owner might have started executing
855 * on another CPU (or the lock could have changed
856 * owner) while we were waiting on the turnstile
857 * chain lock. If so, drop the turnstile lock and try
860 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
861 LK_HOLDER(x) != LK_KERNPROC) {
862 owner = (struct thread *)LK_HOLDER(x);
863 if (TD_IS_RUNNING(owner)) {
864 sleepq_release(&lk->lock_object);
871 * The lock can be in the state where there is a
872 * pending queue of waiters, but still no owner.
873 * This happens when the lock is contested and an
874 * owner is going to claim the lock.
875 * If curthread is the one successfully acquiring it
876 * claim lock ownership and return, preserving waiters
879 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
880 if ((x & ~v) == LK_UNLOCKED) {
881 v &= ~LK_EXCLUSIVE_SPINNERS;
882 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
884 sleepq_release(&lk->lock_object);
886 "%s: %p claimed by a new writer",
890 sleepq_release(&lk->lock_object);
895 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
896 * fail, loop back and retry.
898 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
899 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
900 x | LK_EXCLUSIVE_WAITERS)) {
901 sleepq_release(&lk->lock_object);
904 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
909 * As far as we have been unable to acquire the
910 * exclusive lock and the exclusive waiters flag
911 * is set, we will sleep.
913 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
915 flags &= ~LK_INTERLOCK;
918 "%s: interrupted sleep for %p with %d",
919 __func__, lk, error);
922 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
926 lock_profile_obtain_lock_success(&lk->lock_object,
927 contested, waittime, file, line);
928 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
929 lk->lk_recurse, file, line);
930 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
931 LK_TRYWIT(flags), file, line);
932 TD_LOCKS_INC(curthread);
937 _lockmgr_assert(lk, KA_XLOCKED, file, line);
938 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
939 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
942 * Panic if the lock is recursed.
944 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
945 if (flags & LK_INTERLOCK)
946 class->lc_unlock(ilk);
947 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
948 __func__, iwmesg, file, line);
950 TD_SLOCKS_INC(curthread);
953 * In order to preserve waiters flags, just spin.
957 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
959 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
960 LK_SHARERS_LOCK(1) | x))
966 _lockmgr_assert(lk, KA_LOCKED, file, line);
969 if ((x & LK_SHARE) == 0) {
972 * As first option, treact the lock as if it has not
974 * Fix-up the tid var if the lock has been disowned.
976 if (LK_HOLDER(x) == LK_KERNPROC)
979 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
981 TD_LOCKS_DEC(curthread);
983 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
984 lk->lk_recurse, file, line);
987 * The lock is held in exclusive mode.
988 * If the lock is recursed also, then unrecurse it.
990 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
991 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
996 if (tid != LK_KERNPROC)
997 lock_profile_release_lock(&lk->lock_object);
999 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1003 sleepq_lock(&lk->lock_object);
1008 * If the lock has exclusive waiters, give them
1009 * preference in order to avoid deadlock with
1010 * shared runners up.
1011 * If interruptible sleeps left the exclusive queue
1012 * empty avoid a starvation for the threads sleeping
1013 * on the shared queue by giving them precedence
1014 * and cleaning up the exclusive waiters bit anyway.
1015 * Please note that lk_exslpfail count may be lying
1016 * about the real number of waiters with the
1017 * LK_SLEEPFAIL flag on because they may be used in
1018 * conjuction with interruptible sleeps so
1019 * lk_exslpfail might be considered an 'upper limit'
1020 * bound, including the edge cases.
1022 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1023 realexslp = sleepq_sleepcnt(&lk->lock_object,
1024 SQ_EXCLUSIVE_QUEUE);
1025 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1026 if (lk->lk_exslpfail < realexslp) {
1027 lk->lk_exslpfail = 0;
1028 queue = SQ_EXCLUSIVE_QUEUE;
1029 v |= (x & LK_SHARED_WAITERS);
1031 lk->lk_exslpfail = 0;
1033 "%s: %p has only LK_SLEEPFAIL sleepers",
1036 "%s: %p waking up threads on the exclusive queue",
1039 sleepq_broadcast(&lk->lock_object,
1040 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1041 queue = SQ_SHARED_QUEUE;
1046 * Exclusive waiters sleeping with LK_SLEEPFAIL
1047 * on and using interruptible sleeps/timeout
1048 * may have left spourious lk_exslpfail counts
1049 * on, so clean it up anyway.
1051 lk->lk_exslpfail = 0;
1052 queue = SQ_SHARED_QUEUE;
1056 "%s: %p waking up threads on the %s queue",
1057 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1059 atomic_store_rel_ptr(&lk->lk_lock, v);
1060 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1061 SLEEPQ_LK, 0, queue);
1062 sleepq_release(&lk->lock_object);
1065 wakeup_swapper = wakeupshlk(lk, file, line);
1068 if (LK_CAN_WITNESS(flags))
1069 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1070 LOP_EXCLUSIVE, file, line, ilk);
1073 * Trying to drain a lock we already own will result in a
1076 if (lockmgr_xlocked(lk)) {
1077 if (flags & LK_INTERLOCK)
1078 class->lc_unlock(ilk);
1079 panic("%s: draining %s with the lock held @ %s:%d\n",
1080 __func__, iwmesg, file, line);
1083 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1085 PMC_SOFT_CALL( , , lock, failed);
1087 lock_profile_obtain_lock_failed(&lk->lock_object,
1088 &contested, &waittime);
1091 * If the lock is expected to not sleep just give up
1094 if (LK_TRYOP(flags)) {
1095 LOCK_LOG2(lk, "%s: %p fails the try operation",
1102 * Acquire the sleepqueue chain lock because we
1103 * probabilly will need to manipulate waiters flags.
1105 sleepq_lock(&lk->lock_object);
1109 * if the lock has been released while we spun on
1110 * the sleepqueue chain lock just try again.
1112 if (x == LK_UNLOCKED) {
1113 sleepq_release(&lk->lock_object);
1117 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1118 if ((x & ~v) == LK_UNLOCKED) {
1119 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1122 * If interruptible sleeps left the exclusive
1123 * queue empty avoid a starvation for the
1124 * threads sleeping on the shared queue by
1125 * giving them precedence and cleaning up the
1126 * exclusive waiters bit anyway.
1127 * Please note that lk_exslpfail count may be
1128 * lying about the real number of waiters with
1129 * the LK_SLEEPFAIL flag on because they may
1130 * be used in conjuction with interruptible
1131 * sleeps so lk_exslpfail might be considered
1132 * an 'upper limit' bound, including the edge
1135 if (v & LK_EXCLUSIVE_WAITERS) {
1136 queue = SQ_EXCLUSIVE_QUEUE;
1137 v &= ~LK_EXCLUSIVE_WAITERS;
1141 * Exclusive waiters sleeping with
1142 * LK_SLEEPFAIL on and using
1143 * interruptible sleeps/timeout may
1144 * have left spourious lk_exslpfail
1145 * counts on, so clean it up anyway.
1147 MPASS(v & LK_SHARED_WAITERS);
1148 lk->lk_exslpfail = 0;
1149 queue = SQ_SHARED_QUEUE;
1150 v &= ~LK_SHARED_WAITERS;
1152 if (queue == SQ_EXCLUSIVE_QUEUE) {
1154 sleepq_sleepcnt(&lk->lock_object,
1155 SQ_EXCLUSIVE_QUEUE);
1156 if (lk->lk_exslpfail >= realexslp) {
1157 lk->lk_exslpfail = 0;
1158 queue = SQ_SHARED_QUEUE;
1159 v &= ~LK_SHARED_WAITERS;
1160 if (realexslp != 0) {
1162 "%s: %p has only LK_SLEEPFAIL sleepers",
1165 "%s: %p waking up threads on the exclusive queue",
1171 SQ_EXCLUSIVE_QUEUE);
1174 lk->lk_exslpfail = 0;
1176 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1177 sleepq_release(&lk->lock_object);
1181 "%s: %p waking up all threads on the %s queue",
1182 __func__, lk, queue == SQ_SHARED_QUEUE ?
1183 "shared" : "exclusive");
1184 wakeup_swapper |= sleepq_broadcast(
1185 &lk->lock_object, SLEEPQ_LK, 0, queue);
1188 * If shared waiters have been woken up we need
1189 * to wait for one of them to acquire the lock
1190 * before to set the exclusive waiters in
1191 * order to avoid a deadlock.
1193 if (queue == SQ_SHARED_QUEUE) {
1194 for (v = lk->lk_lock;
1195 (v & LK_SHARE) && !LK_SHARERS(v);
1202 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1203 * fail, loop back and retry.
1205 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1206 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1207 x | LK_EXCLUSIVE_WAITERS)) {
1208 sleepq_release(&lk->lock_object);
1211 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1216 * As far as we have been unable to acquire the
1217 * exclusive lock and the exclusive waiters flag
1218 * is set, we will sleep.
1220 if (flags & LK_INTERLOCK) {
1221 class->lc_unlock(ilk);
1222 flags &= ~LK_INTERLOCK;
1225 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1226 SQ_EXCLUSIVE_QUEUE);
1227 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1229 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1234 lock_profile_obtain_lock_success(&lk->lock_object,
1235 contested, waittime, file, line);
1236 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1237 lk->lk_recurse, file, line);
1238 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1239 LK_TRYWIT(flags), file, line);
1240 TD_LOCKS_INC(curthread);
1245 if (flags & LK_INTERLOCK)
1246 class->lc_unlock(ilk);
1247 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1250 if (flags & LK_INTERLOCK)
1251 class->lc_unlock(ilk);
1259 _lockmgr_disown(struct lock *lk, const char *file, int line)
1263 if (SCHEDULER_STOPPED())
1266 tid = (uintptr_t)curthread;
1267 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1270 * Panic if the lock is recursed.
1272 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1273 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1274 __func__, file, line);
1277 * If the owner is already LK_KERNPROC just skip the whole operation.
1279 if (LK_HOLDER(lk->lk_lock) != tid)
1281 lock_profile_release_lock(&lk->lock_object);
1282 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1283 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1284 TD_LOCKS_DEC(curthread);
1288 * In order to preserve waiters flags, just spin.
1292 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1293 x &= LK_ALL_WAITERS;
1294 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1302 lockmgr_printinfo(const struct lock *lk)
1307 if (lk->lk_lock == LK_UNLOCKED)
1308 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1309 else if (lk->lk_lock & LK_SHARE)
1310 printf("lock type %s: SHARED (count %ju)\n",
1311 lk->lock_object.lo_name,
1312 (uintmax_t)LK_SHARERS(lk->lk_lock));
1314 td = lockmgr_xholder(lk);
1315 printf("lock type %s: EXCL by thread %p "
1316 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1317 td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1321 if (x & LK_EXCLUSIVE_WAITERS)
1322 printf(" with exclusive waiters pending\n");
1323 if (x & LK_SHARED_WAITERS)
1324 printf(" with shared waiters pending\n");
1325 if (x & LK_EXCLUSIVE_SPINNERS)
1326 printf(" with exclusive spinners pending\n");
1332 lockstatus(const struct lock *lk)
1341 if ((x & LK_SHARE) == 0) {
1342 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1346 } else if (x == LK_UNLOCKED)
1352 #ifdef INVARIANT_SUPPORT
1354 FEATURE(invariant_support,
1355 "Support for modules compiled with INVARIANTS option");
1358 #undef _lockmgr_assert
1362 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1366 if (panicstr != NULL)
1370 case KA_SLOCKED | KA_NOTRECURSED:
1371 case KA_SLOCKED | KA_RECURSED:
1374 case KA_LOCKED | KA_NOTRECURSED:
1375 case KA_LOCKED | KA_RECURSED:
1379 * We cannot trust WITNESS if the lock is held in exclusive
1380 * mode and a call to lockmgr_disown() happened.
1381 * Workaround this skipping the check if the lock is held in
1382 * exclusive mode even for the KA_LOCKED case.
1384 if (slocked || (lk->lk_lock & LK_SHARE)) {
1385 witness_assert(&lk->lock_object, what, file, line);
1389 if (lk->lk_lock == LK_UNLOCKED ||
1390 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1391 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1392 panic("Lock %s not %slocked @ %s:%d\n",
1393 lk->lock_object.lo_name, slocked ? "share" : "",
1396 if ((lk->lk_lock & LK_SHARE) == 0) {
1397 if (lockmgr_recursed(lk)) {
1398 if (what & KA_NOTRECURSED)
1399 panic("Lock %s recursed @ %s:%d\n",
1400 lk->lock_object.lo_name, file,
1402 } else if (what & KA_RECURSED)
1403 panic("Lock %s not recursed @ %s:%d\n",
1404 lk->lock_object.lo_name, file, line);
1408 case KA_XLOCKED | KA_NOTRECURSED:
1409 case KA_XLOCKED | KA_RECURSED:
1410 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1411 panic("Lock %s not exclusively locked @ %s:%d\n",
1412 lk->lock_object.lo_name, file, line);
1413 if (lockmgr_recursed(lk)) {
1414 if (what & KA_NOTRECURSED)
1415 panic("Lock %s recursed @ %s:%d\n",
1416 lk->lock_object.lo_name, file, line);
1417 } else if (what & KA_RECURSED)
1418 panic("Lock %s not recursed @ %s:%d\n",
1419 lk->lock_object.lo_name, file, line);
1422 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1423 panic("Lock %s exclusively locked @ %s:%d\n",
1424 lk->lock_object.lo_name, file, line);
1427 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1435 lockmgr_chain(struct thread *td, struct thread **ownerp)
1441 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1443 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1444 if (lk->lk_lock & LK_SHARE)
1445 db_printf("SHARED (count %ju)\n",
1446 (uintmax_t)LK_SHARERS(lk->lk_lock));
1448 db_printf("EXCL\n");
1449 *ownerp = lockmgr_xholder(lk);
1455 db_show_lockmgr(const struct lock_object *lock)
1458 const struct lock *lk;
1460 lk = (const struct lock *)lock;
1462 db_printf(" state: ");
1463 if (lk->lk_lock == LK_UNLOCKED)
1464 db_printf("UNLOCKED\n");
1465 else if (lk->lk_lock & LK_SHARE)
1466 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1468 td = lockmgr_xholder(lk);
1469 if (td == (struct thread *)LK_KERNPROC)
1470 db_printf("XLOCK: LK_KERNPROC\n");
1472 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1473 td->td_tid, td->td_proc->p_pid,
1474 td->td_proc->p_comm);
1475 if (lockmgr_recursed(lk))
1476 db_printf(" recursed: %d\n", lk->lk_recurse);
1478 db_printf(" waiters: ");
1479 switch (lk->lk_lock & LK_ALL_WAITERS) {
1480 case LK_SHARED_WAITERS:
1481 db_printf("shared\n");
1483 case LK_EXCLUSIVE_WAITERS:
1484 db_printf("exclusive\n");
1486 case LK_ALL_WAITERS:
1487 db_printf("shared and exclusive\n");
1490 db_printf("none\n");
1492 db_printf(" spinners: ");
1493 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1494 db_printf("exclusive\n");
1496 db_printf("none\n");