2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_adaptive_lockmgrs.h"
31 #include "opt_hwpmc_hooks.h"
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
44 #include <sys/sleepqueue.h>
46 #include <sys/stack.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include <machine/cpu.h>
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63 (LK_ADAPTIVE | LK_NOSHARE));
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 #define SQ_EXCLUSIVE_QUEUE 0
68 #define SQ_SHARED_QUEUE 1
71 #define _lockmgr_assert(lk, what, file, line)
72 #define TD_LOCKS_INC(td)
73 #define TD_LOCKS_DEC(td)
75 #define TD_LOCKS_INC(td) ((td)->td_locks++)
76 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
78 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
79 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
82 #define STACK_PRINT(lk)
83 #define STACK_SAVE(lk)
84 #define STACK_ZERO(lk)
86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
87 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
88 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
91 #define LOCK_LOG2(lk, string, arg1, arg2) \
92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
93 CTR2(KTR_LOCK, (string), (arg1), (arg2))
94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
96 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98 #define GIANT_DECLARE \
100 WITNESS_SAVE_DECL(Giant)
101 #define GIANT_RESTORE() do { \
105 WITNESS_RESTORE(&Giant.lock_object, Giant); \
108 #define GIANT_SAVE() do { \
109 if (mtx_owned(&Giant)) { \
110 WITNESS_SAVE(&Giant.lock_object, Giant); \
111 while (mtx_owned(&Giant)) { \
113 mtx_unlock(&Giant); \
118 #define LK_CAN_SHARE(x, flags) \
119 (((x) & LK_SHARE) && \
120 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
121 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \
122 (curthread->td_pflags & TDP_DEADLKTREAT)))
123 #define LK_TRYOP(x) \
126 #define LK_CAN_WITNESS(x) \
127 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
128 #define LK_TRYWIT(x) \
129 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
131 #define LK_CAN_ADAPT(lk, f) \
132 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
133 ((f) & LK_SLEEPFAIL) == 0)
135 #define lockmgr_disowned(lk) \
136 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
138 #define lockmgr_xlocked(lk) \
139 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
141 static void assert_lockmgr(const struct lock_object *lock, int how);
143 static void db_show_lockmgr(const struct lock_object *lock);
145 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
147 static int owner_lockmgr(const struct lock_object *lock,
148 struct thread **owner);
150 static uintptr_t unlock_lockmgr(struct lock_object *lock);
152 struct lock_class lock_class_lockmgr = {
153 .lc_name = "lockmgr",
154 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
155 .lc_assert = assert_lockmgr,
157 .lc_ddb_show = db_show_lockmgr,
159 .lc_lock = lock_lockmgr,
160 .lc_unlock = unlock_lockmgr,
162 .lc_owner = owner_lockmgr,
166 #ifdef ADAPTIVE_LOCKMGRS
167 static u_int alk_retries = 10;
168 static u_int alk_loops = 10000;
169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
170 "lockmgr debugging");
171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
175 static __inline struct thread *
176 lockmgr_xholder(const struct lock *lk)
181 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
185 * It assumes sleepq_lock held and returns with this one unheld.
186 * It also assumes the generic interlock is sane and previously checked.
187 * If LK_INTERLOCK is specified the interlock is not reacquired after the
191 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
192 const char *wmesg, int pri, int timo, int queue)
195 struct lock_class *class;
198 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
199 catch = pri & PCATCH;
203 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
204 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
206 if (flags & LK_INTERLOCK)
207 class->lc_unlock(ilk);
208 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
211 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
212 SLEEPQ_INTERRUPTIBLE : 0), queue);
213 if ((flags & LK_TIMELOCK) && timo)
214 sleepq_set_timeout(&lk->lock_object, timo);
217 * Decisional switch for real sleeping.
219 if ((flags & LK_TIMELOCK) && timo && catch)
220 error = sleepq_timedwait_sig(&lk->lock_object, pri);
221 else if ((flags & LK_TIMELOCK) && timo)
222 error = sleepq_timedwait(&lk->lock_object, pri);
224 error = sleepq_wait_sig(&lk->lock_object, pri);
226 sleepq_wait(&lk->lock_object, pri);
228 if ((flags & LK_SLEEPFAIL) && error == 0)
235 wakeupshlk(struct lock *lk, const char *file, int line)
239 int queue, wakeup_swapper;
241 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
242 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
249 * If there is more than one shared lock held, just drop one
252 if (LK_SHARERS(x) > 1) {
253 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
260 * If there are not waiters on the exclusive queue, drop the
263 if ((x & LK_ALL_WAITERS) == 0) {
264 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
266 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
272 * We should have a sharer with waiters, so enter the hard
273 * path in order to handle wakeups correctly.
275 sleepq_lock(&lk->lock_object);
276 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
280 * If the lock has exclusive waiters, give them preference in
281 * order to avoid deadlock with shared runners up.
282 * If interruptible sleeps left the exclusive queue empty
283 * avoid a starvation for the threads sleeping on the shared
284 * queue by giving them precedence and cleaning up the
285 * exclusive waiters bit anyway.
286 * Please note that lk_exslpfail count may be lying about
287 * the real number of waiters with the LK_SLEEPFAIL flag on
288 * because they may be used in conjuction with interruptible
289 * sleeps so lk_exslpfail might be considered an 'upper limit'
290 * bound, including the edge cases.
292 realexslp = sleepq_sleepcnt(&lk->lock_object,
294 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
295 if (lk->lk_exslpfail < realexslp) {
296 lk->lk_exslpfail = 0;
297 queue = SQ_EXCLUSIVE_QUEUE;
298 v |= (x & LK_SHARED_WAITERS);
300 lk->lk_exslpfail = 0;
302 "%s: %p has only LK_SLEEPFAIL sleepers",
305 "%s: %p waking up threads on the exclusive queue",
308 sleepq_broadcast(&lk->lock_object,
309 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
310 queue = SQ_SHARED_QUEUE;
316 * Exclusive waiters sleeping with LK_SLEEPFAIL on
317 * and using interruptible sleeps/timeout may have
318 * left spourious lk_exslpfail counts on, so clean
321 lk->lk_exslpfail = 0;
322 queue = SQ_SHARED_QUEUE;
325 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
327 sleepq_release(&lk->lock_object);
330 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
331 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
333 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
335 sleepq_release(&lk->lock_object);
339 lock_profile_release_lock(&lk->lock_object);
340 TD_LOCKS_DEC(curthread);
341 TD_SLOCKS_DEC(curthread);
342 return (wakeup_swapper);
346 assert_lockmgr(const struct lock_object *lock, int what)
349 panic("lockmgr locks do not support assertions");
353 lock_lockmgr(struct lock_object *lock, uintptr_t how)
356 panic("lockmgr locks do not support sleep interlocking");
360 unlock_lockmgr(struct lock_object *lock)
363 panic("lockmgr locks do not support sleep interlocking");
368 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
371 panic("lockmgr locks do not support owner inquiring");
376 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
380 MPASS((flags & ~LK_INIT_MASK) == 0);
381 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
382 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
385 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
386 if (flags & LK_CANRECURSE)
387 iflags |= LO_RECURSABLE;
388 if ((flags & LK_NODUP) == 0)
390 if (flags & LK_NOPROFILE)
391 iflags |= LO_NOPROFILE;
392 if ((flags & LK_NOWITNESS) == 0)
393 iflags |= LO_WITNESS;
394 if (flags & LK_QUIET)
396 if (flags & LK_IS_VNODE)
397 iflags |= LO_IS_VNODE;
398 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
400 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
401 lk->lk_lock = LK_UNLOCKED;
403 lk->lk_exslpfail = 0;
410 * XXX: Gross hacks to manipulate external lock flags after
411 * initialization. Used for certain vnode and buf locks.
414 lockallowshare(struct lock *lk)
417 lockmgr_assert(lk, KA_XLOCKED);
418 lk->lock_object.lo_flags &= ~LK_NOSHARE;
422 lockdisableshare(struct lock *lk)
425 lockmgr_assert(lk, KA_XLOCKED);
426 lk->lock_object.lo_flags |= LK_NOSHARE;
430 lockallowrecurse(struct lock *lk)
433 lockmgr_assert(lk, KA_XLOCKED);
434 lk->lock_object.lo_flags |= LO_RECURSABLE;
438 lockdisablerecurse(struct lock *lk)
441 lockmgr_assert(lk, KA_XLOCKED);
442 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
446 lockdestroy(struct lock *lk)
449 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
450 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
451 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
452 lock_destroy(&lk->lock_object);
456 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
457 const char *wmesg, int pri, int timo, const char *file, int line)
460 struct lock_class *class;
464 int error, ipri, itimo, queue, wakeup_swapper;
465 #ifdef LOCK_PROFILING
466 uint64_t waittime = 0;
469 #ifdef ADAPTIVE_LOCKMGRS
470 volatile struct thread *owner;
471 u_int i, spintries = 0;
475 tid = (uintptr_t)curthread;
476 op = (flags & LK_TYPE_MASK);
477 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
478 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
479 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
481 MPASS((flags & ~LK_TOTAL_MASK) == 0);
482 KASSERT((op & (op - 1)) == 0,
483 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
484 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
485 (op != LK_DOWNGRADE && op != LK_RELEASE),
486 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
487 __func__, file, line));
488 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
489 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
490 __func__, file, line));
491 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
492 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
493 lk->lock_object.lo_name, file, line));
495 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
496 if (panicstr != NULL) {
497 if (flags & LK_INTERLOCK)
498 class->lc_unlock(ilk);
502 if (lk->lock_object.lo_flags & LK_NOSHARE) {
510 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
512 if (flags & LK_INTERLOCK)
513 class->lc_unlock(ilk);
521 if (LK_CAN_WITNESS(flags))
522 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
523 file, line, flags & LK_INTERLOCK ? ilk : NULL);
528 * If no other thread has an exclusive lock, or
529 * no exclusive waiter is present, bump the count of
530 * sharers. Since we have to preserve the state of
531 * waiters, if we fail to acquire the shared lock
532 * loop back and retry.
534 if (LK_CAN_SHARE(x, flags)) {
535 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
541 PMC_SOFT_CALL( , , lock, failed);
543 lock_profile_obtain_lock_failed(&lk->lock_object,
544 &contested, &waittime);
547 * If the lock is already held by curthread in
548 * exclusive way avoid a deadlock.
550 if (LK_HOLDER(x) == tid) {
552 "%s: %p already held in exclusive mode",
559 * If the lock is expected to not sleep just give up
562 if (LK_TRYOP(flags)) {
563 LOCK_LOG2(lk, "%s: %p fails the try operation",
569 #ifdef ADAPTIVE_LOCKMGRS
571 * If the owner is running on another CPU, spin until
572 * the owner stops running or the state of the lock
573 * changes. We need a double-state handle here
574 * because for a failed acquisition the lock can be
575 * either held in exclusive mode or shared mode
576 * (for the writer starvation avoidance technique).
578 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
579 LK_HOLDER(x) != LK_KERNPROC) {
580 owner = (struct thread *)LK_HOLDER(x);
581 if (LOCK_LOG_TEST(&lk->lock_object, 0))
583 "%s: spinning on %p held by %p",
584 __func__, lk, owner);
585 KTR_STATE1(KTR_SCHED, "thread",
586 sched_tdname(td), "spinning",
587 "lockname:\"%s\"", lk->lock_object.lo_name);
590 * If we are holding also an interlock drop it
591 * in order to avoid a deadlock if the lockmgr
592 * owner is adaptively spinning on the
595 if (flags & LK_INTERLOCK) {
596 class->lc_unlock(ilk);
597 flags &= ~LK_INTERLOCK;
600 while (LK_HOLDER(lk->lk_lock) ==
601 (uintptr_t)owner && TD_IS_RUNNING(owner))
603 KTR_STATE0(KTR_SCHED, "thread",
604 sched_tdname(td), "running");
607 } else if (LK_CAN_ADAPT(lk, flags) &&
608 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
609 spintries < alk_retries) {
610 KTR_STATE1(KTR_SCHED, "thread",
611 sched_tdname(td), "spinning",
612 "lockname:\"%s\"", lk->lock_object.lo_name);
613 if (flags & LK_INTERLOCK) {
614 class->lc_unlock(ilk);
615 flags &= ~LK_INTERLOCK;
619 for (i = 0; i < alk_loops; i++) {
620 if (LOCK_LOG_TEST(&lk->lock_object, 0))
622 "%s: shared spinning on %p with %u and %u",
623 __func__, lk, spintries, i);
625 if ((x & LK_SHARE) == 0 ||
626 LK_CAN_SHARE(x, flags) != 0)
630 KTR_STATE0(KTR_SCHED, "thread",
631 sched_tdname(td), "running");
639 * Acquire the sleepqueue chain lock because we
640 * probabilly will need to manipulate waiters flags.
642 sleepq_lock(&lk->lock_object);
646 * if the lock can be acquired in shared mode, try
649 if (LK_CAN_SHARE(x, flags)) {
650 sleepq_release(&lk->lock_object);
654 #ifdef ADAPTIVE_LOCKMGRS
656 * The current lock owner might have started executing
657 * on another CPU (or the lock could have changed
658 * owner) while we were waiting on the turnstile
659 * chain lock. If so, drop the turnstile lock and try
662 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
663 LK_HOLDER(x) != LK_KERNPROC) {
664 owner = (struct thread *)LK_HOLDER(x);
665 if (TD_IS_RUNNING(owner)) {
666 sleepq_release(&lk->lock_object);
673 * Try to set the LK_SHARED_WAITERS flag. If we fail,
674 * loop back and retry.
676 if ((x & LK_SHARED_WAITERS) == 0) {
677 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
678 x | LK_SHARED_WAITERS)) {
679 sleepq_release(&lk->lock_object);
682 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
687 * As far as we have been unable to acquire the
688 * shared lock and the shared waiters flag is set,
691 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
693 flags &= ~LK_INTERLOCK;
696 "%s: interrupted sleep for %p with %d",
697 __func__, lk, error);
700 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
704 lock_profile_obtain_lock_success(&lk->lock_object,
705 contested, waittime, file, line);
706 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
708 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
710 TD_LOCKS_INC(curthread);
711 TD_SLOCKS_INC(curthread);
717 _lockmgr_assert(lk, KA_SLOCKED, file, line);
719 x = v & LK_ALL_WAITERS;
720 v &= LK_EXCLUSIVE_SPINNERS;
723 * Try to switch from one shared lock to an exclusive one.
724 * We need to preserve waiters flags during the operation.
726 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
728 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
730 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
731 LK_TRYWIT(flags), file, line);
732 TD_SLOCKS_DEC(curthread);
737 * In LK_TRYUPGRADE mode, do not drop the lock,
738 * returning EBUSY instead.
740 if (op == LK_TRYUPGRADE) {
741 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
748 * We have been unable to succeed in upgrading, so just
749 * give up the shared lock.
751 wakeup_swapper |= wakeupshlk(lk, file, line);
755 if (LK_CAN_WITNESS(flags))
756 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
757 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
761 * If curthread already holds the lock and this one is
762 * allowed to recurse, simply recurse on it.
764 if (lockmgr_xlocked(lk)) {
765 if ((flags & LK_CANRECURSE) == 0 &&
766 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
769 * If the lock is expected to not panic just
770 * give up and return.
772 if (LK_TRYOP(flags)) {
774 "%s: %p fails the try operation",
779 if (flags & LK_INTERLOCK)
780 class->lc_unlock(ilk);
781 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
782 __func__, iwmesg, file, line);
785 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
786 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
787 lk->lk_recurse, file, line);
788 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
789 LK_TRYWIT(flags), file, line);
790 TD_LOCKS_INC(curthread);
794 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
797 PMC_SOFT_CALL( , , lock, failed);
799 lock_profile_obtain_lock_failed(&lk->lock_object,
800 &contested, &waittime);
803 * If the lock is expected to not sleep just give up
806 if (LK_TRYOP(flags)) {
807 LOCK_LOG2(lk, "%s: %p fails the try operation",
813 #ifdef ADAPTIVE_LOCKMGRS
815 * If the owner is running on another CPU, spin until
816 * the owner stops running or the state of the lock
820 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
821 LK_HOLDER(x) != LK_KERNPROC) {
822 owner = (struct thread *)LK_HOLDER(x);
823 if (LOCK_LOG_TEST(&lk->lock_object, 0))
825 "%s: spinning on %p held by %p",
826 __func__, lk, owner);
827 KTR_STATE1(KTR_SCHED, "thread",
828 sched_tdname(td), "spinning",
829 "lockname:\"%s\"", lk->lock_object.lo_name);
832 * If we are holding also an interlock drop it
833 * in order to avoid a deadlock if the lockmgr
834 * owner is adaptively spinning on the
837 if (flags & LK_INTERLOCK) {
838 class->lc_unlock(ilk);
839 flags &= ~LK_INTERLOCK;
842 while (LK_HOLDER(lk->lk_lock) ==
843 (uintptr_t)owner && TD_IS_RUNNING(owner))
845 KTR_STATE0(KTR_SCHED, "thread",
846 sched_tdname(td), "running");
849 } else if (LK_CAN_ADAPT(lk, flags) &&
850 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
851 spintries < alk_retries) {
852 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
853 !atomic_cmpset_ptr(&lk->lk_lock, x,
854 x | LK_EXCLUSIVE_SPINNERS))
856 KTR_STATE1(KTR_SCHED, "thread",
857 sched_tdname(td), "spinning",
858 "lockname:\"%s\"", lk->lock_object.lo_name);
859 if (flags & LK_INTERLOCK) {
860 class->lc_unlock(ilk);
861 flags &= ~LK_INTERLOCK;
865 for (i = 0; i < alk_loops; i++) {
866 if (LOCK_LOG_TEST(&lk->lock_object, 0))
868 "%s: shared spinning on %p with %u and %u",
869 __func__, lk, spintries, i);
871 LK_EXCLUSIVE_SPINNERS) == 0)
875 KTR_STATE0(KTR_SCHED, "thread",
876 sched_tdname(td), "running");
884 * Acquire the sleepqueue chain lock because we
885 * probabilly will need to manipulate waiters flags.
887 sleepq_lock(&lk->lock_object);
891 * if the lock has been released while we spun on
892 * the sleepqueue chain lock just try again.
894 if (x == LK_UNLOCKED) {
895 sleepq_release(&lk->lock_object);
899 #ifdef ADAPTIVE_LOCKMGRS
901 * The current lock owner might have started executing
902 * on another CPU (or the lock could have changed
903 * owner) while we were waiting on the turnstile
904 * chain lock. If so, drop the turnstile lock and try
907 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
908 LK_HOLDER(x) != LK_KERNPROC) {
909 owner = (struct thread *)LK_HOLDER(x);
910 if (TD_IS_RUNNING(owner)) {
911 sleepq_release(&lk->lock_object);
918 * The lock can be in the state where there is a
919 * pending queue of waiters, but still no owner.
920 * This happens when the lock is contested and an
921 * owner is going to claim the lock.
922 * If curthread is the one successfully acquiring it
923 * claim lock ownership and return, preserving waiters
926 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
927 if ((x & ~v) == LK_UNLOCKED) {
928 v &= ~LK_EXCLUSIVE_SPINNERS;
929 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
931 sleepq_release(&lk->lock_object);
933 "%s: %p claimed by a new writer",
937 sleepq_release(&lk->lock_object);
942 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
943 * fail, loop back and retry.
945 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
946 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
947 x | LK_EXCLUSIVE_WAITERS)) {
948 sleepq_release(&lk->lock_object);
951 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
956 * As far as we have been unable to acquire the
957 * exclusive lock and the exclusive waiters flag
958 * is set, we will sleep.
960 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
962 flags &= ~LK_INTERLOCK;
965 "%s: interrupted sleep for %p with %d",
966 __func__, lk, error);
969 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
973 lock_profile_obtain_lock_success(&lk->lock_object,
974 contested, waittime, file, line);
975 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
976 lk->lk_recurse, file, line);
977 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
978 LK_TRYWIT(flags), file, line);
979 TD_LOCKS_INC(curthread);
984 _lockmgr_assert(lk, KA_XLOCKED, file, line);
985 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
986 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
989 * Panic if the lock is recursed.
991 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
992 if (flags & LK_INTERLOCK)
993 class->lc_unlock(ilk);
994 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
995 __func__, iwmesg, file, line);
997 TD_SLOCKS_INC(curthread);
1000 * In order to preserve waiters flags, just spin.
1004 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1005 x &= LK_ALL_WAITERS;
1006 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1007 LK_SHARERS_LOCK(1) | x))
1013 _lockmgr_assert(lk, KA_LOCKED, file, line);
1016 if ((x & LK_SHARE) == 0) {
1019 * As first option, treact the lock as if it has not
1021 * Fix-up the tid var if the lock has been disowned.
1023 if (LK_HOLDER(x) == LK_KERNPROC)
1026 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1028 TD_LOCKS_DEC(curthread);
1030 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1031 lk->lk_recurse, file, line);
1034 * The lock is held in exclusive mode.
1035 * If the lock is recursed also, then unrecurse it.
1037 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1038 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1043 if (tid != LK_KERNPROC)
1044 lock_profile_release_lock(&lk->lock_object);
1046 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1050 sleepq_lock(&lk->lock_object);
1055 * If the lock has exclusive waiters, give them
1056 * preference in order to avoid deadlock with
1057 * shared runners up.
1058 * If interruptible sleeps left the exclusive queue
1059 * empty avoid a starvation for the threads sleeping
1060 * on the shared queue by giving them precedence
1061 * and cleaning up the exclusive waiters bit anyway.
1062 * Please note that lk_exslpfail count may be lying
1063 * about the real number of waiters with the
1064 * LK_SLEEPFAIL flag on because they may be used in
1065 * conjuction with interruptible sleeps so
1066 * lk_exslpfail might be considered an 'upper limit'
1067 * bound, including the edge cases.
1069 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1070 realexslp = sleepq_sleepcnt(&lk->lock_object,
1071 SQ_EXCLUSIVE_QUEUE);
1072 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1073 if (lk->lk_exslpfail < realexslp) {
1074 lk->lk_exslpfail = 0;
1075 queue = SQ_EXCLUSIVE_QUEUE;
1076 v |= (x & LK_SHARED_WAITERS);
1078 lk->lk_exslpfail = 0;
1080 "%s: %p has only LK_SLEEPFAIL sleepers",
1083 "%s: %p waking up threads on the exclusive queue",
1086 sleepq_broadcast(&lk->lock_object,
1087 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1088 queue = SQ_SHARED_QUEUE;
1093 * Exclusive waiters sleeping with LK_SLEEPFAIL
1094 * on and using interruptible sleeps/timeout
1095 * may have left spourious lk_exslpfail counts
1096 * on, so clean it up anyway.
1098 lk->lk_exslpfail = 0;
1099 queue = SQ_SHARED_QUEUE;
1103 "%s: %p waking up threads on the %s queue",
1104 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1106 atomic_store_rel_ptr(&lk->lk_lock, v);
1107 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1108 SLEEPQ_LK, 0, queue);
1109 sleepq_release(&lk->lock_object);
1112 wakeup_swapper = wakeupshlk(lk, file, line);
1115 if (LK_CAN_WITNESS(flags))
1116 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1117 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1121 * Trying to drain a lock we already own will result in a
1124 if (lockmgr_xlocked(lk)) {
1125 if (flags & LK_INTERLOCK)
1126 class->lc_unlock(ilk);
1127 panic("%s: draining %s with the lock held @ %s:%d\n",
1128 __func__, iwmesg, file, line);
1131 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1133 PMC_SOFT_CALL( , , lock, failed);
1135 lock_profile_obtain_lock_failed(&lk->lock_object,
1136 &contested, &waittime);
1139 * If the lock is expected to not sleep just give up
1142 if (LK_TRYOP(flags)) {
1143 LOCK_LOG2(lk, "%s: %p fails the try operation",
1150 * Acquire the sleepqueue chain lock because we
1151 * probabilly will need to manipulate waiters flags.
1153 sleepq_lock(&lk->lock_object);
1157 * if the lock has been released while we spun on
1158 * the sleepqueue chain lock just try again.
1160 if (x == LK_UNLOCKED) {
1161 sleepq_release(&lk->lock_object);
1165 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1166 if ((x & ~v) == LK_UNLOCKED) {
1167 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1170 * If interruptible sleeps left the exclusive
1171 * queue empty avoid a starvation for the
1172 * threads sleeping on the shared queue by
1173 * giving them precedence and cleaning up the
1174 * exclusive waiters bit anyway.
1175 * Please note that lk_exslpfail count may be
1176 * lying about the real number of waiters with
1177 * the LK_SLEEPFAIL flag on because they may
1178 * be used in conjuction with interruptible
1179 * sleeps so lk_exslpfail might be considered
1180 * an 'upper limit' bound, including the edge
1183 if (v & LK_EXCLUSIVE_WAITERS) {
1184 queue = SQ_EXCLUSIVE_QUEUE;
1185 v &= ~LK_EXCLUSIVE_WAITERS;
1189 * Exclusive waiters sleeping with
1190 * LK_SLEEPFAIL on and using
1191 * interruptible sleeps/timeout may
1192 * have left spourious lk_exslpfail
1193 * counts on, so clean it up anyway.
1195 MPASS(v & LK_SHARED_WAITERS);
1196 lk->lk_exslpfail = 0;
1197 queue = SQ_SHARED_QUEUE;
1198 v &= ~LK_SHARED_WAITERS;
1200 if (queue == SQ_EXCLUSIVE_QUEUE) {
1202 sleepq_sleepcnt(&lk->lock_object,
1203 SQ_EXCLUSIVE_QUEUE);
1204 if (lk->lk_exslpfail >= realexslp) {
1205 lk->lk_exslpfail = 0;
1206 queue = SQ_SHARED_QUEUE;
1207 v &= ~LK_SHARED_WAITERS;
1208 if (realexslp != 0) {
1210 "%s: %p has only LK_SLEEPFAIL sleepers",
1213 "%s: %p waking up threads on the exclusive queue",
1219 SQ_EXCLUSIVE_QUEUE);
1222 lk->lk_exslpfail = 0;
1224 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1225 sleepq_release(&lk->lock_object);
1229 "%s: %p waking up all threads on the %s queue",
1230 __func__, lk, queue == SQ_SHARED_QUEUE ?
1231 "shared" : "exclusive");
1232 wakeup_swapper |= sleepq_broadcast(
1233 &lk->lock_object, SLEEPQ_LK, 0, queue);
1236 * If shared waiters have been woken up we need
1237 * to wait for one of them to acquire the lock
1238 * before to set the exclusive waiters in
1239 * order to avoid a deadlock.
1241 if (queue == SQ_SHARED_QUEUE) {
1242 for (v = lk->lk_lock;
1243 (v & LK_SHARE) && !LK_SHARERS(v);
1250 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1251 * fail, loop back and retry.
1253 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1254 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1255 x | LK_EXCLUSIVE_WAITERS)) {
1256 sleepq_release(&lk->lock_object);
1259 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1264 * As far as we have been unable to acquire the
1265 * exclusive lock and the exclusive waiters flag
1266 * is set, we will sleep.
1268 if (flags & LK_INTERLOCK) {
1269 class->lc_unlock(ilk);
1270 flags &= ~LK_INTERLOCK;
1273 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1274 SQ_EXCLUSIVE_QUEUE);
1275 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1277 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1282 lock_profile_obtain_lock_success(&lk->lock_object,
1283 contested, waittime, file, line);
1284 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1285 lk->lk_recurse, file, line);
1286 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1287 LK_TRYWIT(flags), file, line);
1288 TD_LOCKS_INC(curthread);
1293 if (flags & LK_INTERLOCK)
1294 class->lc_unlock(ilk);
1295 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1298 if (flags & LK_INTERLOCK)
1299 class->lc_unlock(ilk);
1307 _lockmgr_disown(struct lock *lk, const char *file, int line)
1311 if (SCHEDULER_STOPPED())
1314 tid = (uintptr_t)curthread;
1315 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1318 * Panic if the lock is recursed.
1320 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1321 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1322 __func__, file, line);
1325 * If the owner is already LK_KERNPROC just skip the whole operation.
1327 if (LK_HOLDER(lk->lk_lock) != tid)
1329 lock_profile_release_lock(&lk->lock_object);
1330 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1331 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1332 TD_LOCKS_DEC(curthread);
1336 * In order to preserve waiters flags, just spin.
1340 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1341 x &= LK_ALL_WAITERS;
1342 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1350 lockmgr_printinfo(const struct lock *lk)
1355 if (lk->lk_lock == LK_UNLOCKED)
1356 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1357 else if (lk->lk_lock & LK_SHARE)
1358 printf("lock type %s: SHARED (count %ju)\n",
1359 lk->lock_object.lo_name,
1360 (uintmax_t)LK_SHARERS(lk->lk_lock));
1362 td = lockmgr_xholder(lk);
1363 if (td == (struct thread *)LK_KERNPROC)
1364 printf("lock type %s: EXCL by KERNPROC\n",
1365 lk->lock_object.lo_name);
1367 printf("lock type %s: EXCL by thread %p "
1368 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1369 td, td->td_proc->p_pid, td->td_proc->p_comm,
1374 if (x & LK_EXCLUSIVE_WAITERS)
1375 printf(" with exclusive waiters pending\n");
1376 if (x & LK_SHARED_WAITERS)
1377 printf(" with shared waiters pending\n");
1378 if (x & LK_EXCLUSIVE_SPINNERS)
1379 printf(" with exclusive spinners pending\n");
1385 lockstatus(const struct lock *lk)
1394 if ((x & LK_SHARE) == 0) {
1395 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1399 } else if (x == LK_UNLOCKED)
1405 #ifdef INVARIANT_SUPPORT
1407 FEATURE(invariant_support,
1408 "Support for modules compiled with INVARIANTS option");
1411 #undef _lockmgr_assert
1415 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1419 if (panicstr != NULL)
1423 case KA_SLOCKED | KA_NOTRECURSED:
1424 case KA_SLOCKED | KA_RECURSED:
1427 case KA_LOCKED | KA_NOTRECURSED:
1428 case KA_LOCKED | KA_RECURSED:
1432 * We cannot trust WITNESS if the lock is held in exclusive
1433 * mode and a call to lockmgr_disown() happened.
1434 * Workaround this skipping the check if the lock is held in
1435 * exclusive mode even for the KA_LOCKED case.
1437 if (slocked || (lk->lk_lock & LK_SHARE)) {
1438 witness_assert(&lk->lock_object, what, file, line);
1442 if (lk->lk_lock == LK_UNLOCKED ||
1443 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1444 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1445 panic("Lock %s not %slocked @ %s:%d\n",
1446 lk->lock_object.lo_name, slocked ? "share" : "",
1449 if ((lk->lk_lock & LK_SHARE) == 0) {
1450 if (lockmgr_recursed(lk)) {
1451 if (what & KA_NOTRECURSED)
1452 panic("Lock %s recursed @ %s:%d\n",
1453 lk->lock_object.lo_name, file,
1455 } else if (what & KA_RECURSED)
1456 panic("Lock %s not recursed @ %s:%d\n",
1457 lk->lock_object.lo_name, file, line);
1461 case KA_XLOCKED | KA_NOTRECURSED:
1462 case KA_XLOCKED | KA_RECURSED:
1463 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1464 panic("Lock %s not exclusively locked @ %s:%d\n",
1465 lk->lock_object.lo_name, file, line);
1466 if (lockmgr_recursed(lk)) {
1467 if (what & KA_NOTRECURSED)
1468 panic("Lock %s recursed @ %s:%d\n",
1469 lk->lock_object.lo_name, file, line);
1470 } else if (what & KA_RECURSED)
1471 panic("Lock %s not recursed @ %s:%d\n",
1472 lk->lock_object.lo_name, file, line);
1475 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1476 panic("Lock %s exclusively locked @ %s:%d\n",
1477 lk->lock_object.lo_name, file, line);
1480 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1488 lockmgr_chain(struct thread *td, struct thread **ownerp)
1494 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1496 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1497 if (lk->lk_lock & LK_SHARE)
1498 db_printf("SHARED (count %ju)\n",
1499 (uintmax_t)LK_SHARERS(lk->lk_lock));
1501 db_printf("EXCL\n");
1502 *ownerp = lockmgr_xholder(lk);
1508 db_show_lockmgr(const struct lock_object *lock)
1511 const struct lock *lk;
1513 lk = (const struct lock *)lock;
1515 db_printf(" state: ");
1516 if (lk->lk_lock == LK_UNLOCKED)
1517 db_printf("UNLOCKED\n");
1518 else if (lk->lk_lock & LK_SHARE)
1519 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1521 td = lockmgr_xholder(lk);
1522 if (td == (struct thread *)LK_KERNPROC)
1523 db_printf("XLOCK: LK_KERNPROC\n");
1525 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1526 td->td_tid, td->td_proc->p_pid,
1527 td->td_proc->p_comm);
1528 if (lockmgr_recursed(lk))
1529 db_printf(" recursed: %d\n", lk->lk_recurse);
1531 db_printf(" waiters: ");
1532 switch (lk->lk_lock & LK_ALL_WAITERS) {
1533 case LK_SHARED_WAITERS:
1534 db_printf("shared\n");
1536 case LK_EXCLUSIVE_WAITERS:
1537 db_printf("exclusive\n");
1539 case LK_ALL_WAITERS:
1540 db_printf("shared and exclusive\n");
1543 db_printf("none\n");
1545 db_printf(" spinners: ");
1546 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1547 db_printf("exclusive\n");
1549 db_printf("none\n");