2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_adaptive_lockmgrs.h"
31 #include "opt_hwpmc_hooks.h"
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
44 #include <sys/sleepqueue.h>
46 #include <sys/stack.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include <machine/cpu.h>
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63 (LK_ADAPTIVE | LK_NOSHARE));
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 #define SQ_EXCLUSIVE_QUEUE 0
68 #define SQ_SHARED_QUEUE 1
71 #define _lockmgr_assert(lk, what, file, line)
74 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
75 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
78 #define STACK_PRINT(lk)
79 #define STACK_SAVE(lk)
80 #define STACK_ZERO(lk)
82 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
83 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
84 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
87 #define LOCK_LOG2(lk, string, arg1, arg2) \
88 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
89 CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
91 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
92 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
94 #define GIANT_DECLARE \
96 WITNESS_SAVE_DECL(Giant)
97 #define GIANT_RESTORE() do { \
101 WITNESS_RESTORE(&Giant.lock_object, Giant); \
104 #define GIANT_SAVE() do { \
105 if (mtx_owned(&Giant)) { \
106 WITNESS_SAVE(&Giant.lock_object, Giant); \
107 while (mtx_owned(&Giant)) { \
109 mtx_unlock(&Giant); \
114 #define LK_CAN_SHARE(x, flags) \
115 (((x) & LK_SHARE) && \
116 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
117 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \
118 (curthread->td_pflags & TDP_DEADLKTREAT)))
119 #define LK_TRYOP(x) \
122 #define LK_CAN_WITNESS(x) \
123 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
124 #define LK_TRYWIT(x) \
125 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
127 #define LK_CAN_ADAPT(lk, f) \
128 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
129 ((f) & LK_SLEEPFAIL) == 0)
131 #define lockmgr_disowned(lk) \
132 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
134 #define lockmgr_xlocked(lk) \
135 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
137 static void assert_lockmgr(const struct lock_object *lock, int how);
139 static void db_show_lockmgr(const struct lock_object *lock);
141 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
143 static int owner_lockmgr(const struct lock_object *lock,
144 struct thread **owner);
146 static uintptr_t unlock_lockmgr(struct lock_object *lock);
148 struct lock_class lock_class_lockmgr = {
149 .lc_name = "lockmgr",
150 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
151 .lc_assert = assert_lockmgr,
153 .lc_ddb_show = db_show_lockmgr,
155 .lc_lock = lock_lockmgr,
156 .lc_unlock = unlock_lockmgr,
158 .lc_owner = owner_lockmgr,
162 #ifdef ADAPTIVE_LOCKMGRS
163 static u_int alk_retries = 10;
164 static u_int alk_loops = 10000;
165 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
166 "lockmgr debugging");
167 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
168 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
171 static __inline struct thread *
172 lockmgr_xholder(const struct lock *lk)
177 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
181 * It assumes sleepq_lock held and returns with this one unheld.
182 * It also assumes the generic interlock is sane and previously checked.
183 * If LK_INTERLOCK is specified the interlock is not reacquired after the
187 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
188 const char *wmesg, int pri, int timo, int queue)
191 struct lock_class *class;
194 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
195 catch = pri & PCATCH;
199 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
200 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
202 if (flags & LK_INTERLOCK)
203 class->lc_unlock(ilk);
204 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
207 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
208 SLEEPQ_INTERRUPTIBLE : 0), queue);
209 if ((flags & LK_TIMELOCK) && timo)
210 sleepq_set_timeout(&lk->lock_object, timo);
213 * Decisional switch for real sleeping.
215 if ((flags & LK_TIMELOCK) && timo && catch)
216 error = sleepq_timedwait_sig(&lk->lock_object, pri);
217 else if ((flags & LK_TIMELOCK) && timo)
218 error = sleepq_timedwait(&lk->lock_object, pri);
220 error = sleepq_wait_sig(&lk->lock_object, pri);
222 sleepq_wait(&lk->lock_object, pri);
224 if ((flags & LK_SLEEPFAIL) && error == 0)
231 wakeupshlk(struct lock *lk, const char *file, int line)
235 int queue, wakeup_swapper;
237 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
238 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
245 * If there is more than one shared lock held, just drop one
248 if (LK_SHARERS(x) > 1) {
249 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
256 * If there are not waiters on the exclusive queue, drop the
259 if ((x & LK_ALL_WAITERS) == 0) {
260 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
262 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
268 * We should have a sharer with waiters, so enter the hard
269 * path in order to handle wakeups correctly.
271 sleepq_lock(&lk->lock_object);
272 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
276 * If the lock has exclusive waiters, give them preference in
277 * order to avoid deadlock with shared runners up.
278 * If interruptible sleeps left the exclusive queue empty
279 * avoid a starvation for the threads sleeping on the shared
280 * queue by giving them precedence and cleaning up the
281 * exclusive waiters bit anyway.
282 * Please note that lk_exslpfail count may be lying about
283 * the real number of waiters with the LK_SLEEPFAIL flag on
284 * because they may be used in conjuction with interruptible
285 * sleeps so lk_exslpfail might be considered an 'upper limit'
286 * bound, including the edge cases.
288 realexslp = sleepq_sleepcnt(&lk->lock_object,
290 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
291 if (lk->lk_exslpfail < realexslp) {
292 lk->lk_exslpfail = 0;
293 queue = SQ_EXCLUSIVE_QUEUE;
294 v |= (x & LK_SHARED_WAITERS);
296 lk->lk_exslpfail = 0;
298 "%s: %p has only LK_SLEEPFAIL sleepers",
301 "%s: %p waking up threads on the exclusive queue",
304 sleepq_broadcast(&lk->lock_object,
305 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
306 queue = SQ_SHARED_QUEUE;
312 * Exclusive waiters sleeping with LK_SLEEPFAIL on
313 * and using interruptible sleeps/timeout may have
314 * left spourious lk_exslpfail counts on, so clean
317 lk->lk_exslpfail = 0;
318 queue = SQ_SHARED_QUEUE;
321 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
323 sleepq_release(&lk->lock_object);
326 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
327 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
329 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
331 sleepq_release(&lk->lock_object);
335 lock_profile_release_lock(&lk->lock_object);
336 TD_LOCKS_DEC(curthread);
337 TD_SLOCKS_DEC(curthread);
338 return (wakeup_swapper);
342 assert_lockmgr(const struct lock_object *lock, int what)
345 panic("lockmgr locks do not support assertions");
349 lock_lockmgr(struct lock_object *lock, uintptr_t how)
352 panic("lockmgr locks do not support sleep interlocking");
356 unlock_lockmgr(struct lock_object *lock)
359 panic("lockmgr locks do not support sleep interlocking");
364 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
367 panic("lockmgr locks do not support owner inquiring");
372 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
376 MPASS((flags & ~LK_INIT_MASK) == 0);
377 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
378 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
381 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
382 if (flags & LK_CANRECURSE)
383 iflags |= LO_RECURSABLE;
384 if ((flags & LK_NODUP) == 0)
386 if (flags & LK_NOPROFILE)
387 iflags |= LO_NOPROFILE;
388 if ((flags & LK_NOWITNESS) == 0)
389 iflags |= LO_WITNESS;
390 if (flags & LK_QUIET)
392 if (flags & LK_IS_VNODE)
393 iflags |= LO_IS_VNODE;
394 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
396 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
397 lk->lk_lock = LK_UNLOCKED;
399 lk->lk_exslpfail = 0;
406 * XXX: Gross hacks to manipulate external lock flags after
407 * initialization. Used for certain vnode and buf locks.
410 lockallowshare(struct lock *lk)
413 lockmgr_assert(lk, KA_XLOCKED);
414 lk->lock_object.lo_flags &= ~LK_NOSHARE;
418 lockdisableshare(struct lock *lk)
421 lockmgr_assert(lk, KA_XLOCKED);
422 lk->lock_object.lo_flags |= LK_NOSHARE;
426 lockallowrecurse(struct lock *lk)
429 lockmgr_assert(lk, KA_XLOCKED);
430 lk->lock_object.lo_flags |= LO_RECURSABLE;
434 lockdisablerecurse(struct lock *lk)
437 lockmgr_assert(lk, KA_XLOCKED);
438 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
442 lockdestroy(struct lock *lk)
445 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
446 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
447 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
448 lock_destroy(&lk->lock_object);
452 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
453 const char *wmesg, int pri, int timo, const char *file, int line)
456 struct lock_class *class;
460 int error, ipri, itimo, queue, wakeup_swapper;
461 #ifdef LOCK_PROFILING
462 uint64_t waittime = 0;
465 #ifdef ADAPTIVE_LOCKMGRS
466 volatile struct thread *owner;
467 u_int i, spintries = 0;
471 tid = (uintptr_t)curthread;
472 op = (flags & LK_TYPE_MASK);
473 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
474 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
475 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
477 MPASS((flags & ~LK_TOTAL_MASK) == 0);
478 KASSERT((op & (op - 1)) == 0,
479 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
480 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
481 (op != LK_DOWNGRADE && op != LK_RELEASE),
482 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
483 __func__, file, line));
484 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
485 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
486 __func__, file, line));
487 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
488 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
489 lk->lock_object.lo_name, file, line));
491 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
492 if (panicstr != NULL) {
493 if (flags & LK_INTERLOCK)
494 class->lc_unlock(ilk);
498 if (lk->lock_object.lo_flags & LK_NOSHARE) {
506 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
508 if (flags & LK_INTERLOCK)
509 class->lc_unlock(ilk);
517 if (LK_CAN_WITNESS(flags))
518 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
519 file, line, flags & LK_INTERLOCK ? ilk : NULL);
524 * If no other thread has an exclusive lock, or
525 * no exclusive waiter is present, bump the count of
526 * sharers. Since we have to preserve the state of
527 * waiters, if we fail to acquire the shared lock
528 * loop back and retry.
530 if (LK_CAN_SHARE(x, flags)) {
531 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
537 PMC_SOFT_CALL( , , lock, failed);
539 lock_profile_obtain_lock_failed(&lk->lock_object,
540 &contested, &waittime);
543 * If the lock is already held by curthread in
544 * exclusive way avoid a deadlock.
546 if (LK_HOLDER(x) == tid) {
548 "%s: %p already held in exclusive mode",
555 * If the lock is expected to not sleep just give up
558 if (LK_TRYOP(flags)) {
559 LOCK_LOG2(lk, "%s: %p fails the try operation",
565 #ifdef ADAPTIVE_LOCKMGRS
567 * If the owner is running on another CPU, spin until
568 * the owner stops running or the state of the lock
569 * changes. We need a double-state handle here
570 * because for a failed acquisition the lock can be
571 * either held in exclusive mode or shared mode
572 * (for the writer starvation avoidance technique).
574 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
575 LK_HOLDER(x) != LK_KERNPROC) {
576 owner = (struct thread *)LK_HOLDER(x);
577 if (LOCK_LOG_TEST(&lk->lock_object, 0))
579 "%s: spinning on %p held by %p",
580 __func__, lk, owner);
581 KTR_STATE1(KTR_SCHED, "thread",
582 sched_tdname(td), "spinning",
583 "lockname:\"%s\"", lk->lock_object.lo_name);
586 * If we are holding also an interlock drop it
587 * in order to avoid a deadlock if the lockmgr
588 * owner is adaptively spinning on the
591 if (flags & LK_INTERLOCK) {
592 class->lc_unlock(ilk);
593 flags &= ~LK_INTERLOCK;
596 while (LK_HOLDER(lk->lk_lock) ==
597 (uintptr_t)owner && TD_IS_RUNNING(owner))
599 KTR_STATE0(KTR_SCHED, "thread",
600 sched_tdname(td), "running");
603 } else if (LK_CAN_ADAPT(lk, flags) &&
604 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
605 spintries < alk_retries) {
606 KTR_STATE1(KTR_SCHED, "thread",
607 sched_tdname(td), "spinning",
608 "lockname:\"%s\"", lk->lock_object.lo_name);
609 if (flags & LK_INTERLOCK) {
610 class->lc_unlock(ilk);
611 flags &= ~LK_INTERLOCK;
615 for (i = 0; i < alk_loops; i++) {
616 if (LOCK_LOG_TEST(&lk->lock_object, 0))
618 "%s: shared spinning on %p with %u and %u",
619 __func__, lk, spintries, i);
621 if ((x & LK_SHARE) == 0 ||
622 LK_CAN_SHARE(x, flags) != 0)
626 KTR_STATE0(KTR_SCHED, "thread",
627 sched_tdname(td), "running");
635 * Acquire the sleepqueue chain lock because we
636 * probabilly will need to manipulate waiters flags.
638 sleepq_lock(&lk->lock_object);
642 * if the lock can be acquired in shared mode, try
645 if (LK_CAN_SHARE(x, flags)) {
646 sleepq_release(&lk->lock_object);
650 #ifdef ADAPTIVE_LOCKMGRS
652 * The current lock owner might have started executing
653 * on another CPU (or the lock could have changed
654 * owner) while we were waiting on the turnstile
655 * chain lock. If so, drop the turnstile lock and try
658 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
659 LK_HOLDER(x) != LK_KERNPROC) {
660 owner = (struct thread *)LK_HOLDER(x);
661 if (TD_IS_RUNNING(owner)) {
662 sleepq_release(&lk->lock_object);
669 * Try to set the LK_SHARED_WAITERS flag. If we fail,
670 * loop back and retry.
672 if ((x & LK_SHARED_WAITERS) == 0) {
673 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
674 x | LK_SHARED_WAITERS)) {
675 sleepq_release(&lk->lock_object);
678 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
683 * As far as we have been unable to acquire the
684 * shared lock and the shared waiters flag is set,
687 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
689 flags &= ~LK_INTERLOCK;
692 "%s: interrupted sleep for %p with %d",
693 __func__, lk, error);
696 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
700 lock_profile_obtain_lock_success(&lk->lock_object,
701 contested, waittime, file, line);
702 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
704 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
706 TD_LOCKS_INC(curthread);
707 TD_SLOCKS_INC(curthread);
713 _lockmgr_assert(lk, KA_SLOCKED, file, line);
715 x = v & LK_ALL_WAITERS;
716 v &= LK_EXCLUSIVE_SPINNERS;
719 * Try to switch from one shared lock to an exclusive one.
720 * We need to preserve waiters flags during the operation.
722 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
724 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
726 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
727 LK_TRYWIT(flags), file, line);
728 TD_SLOCKS_DEC(curthread);
733 * In LK_TRYUPGRADE mode, do not drop the lock,
734 * returning EBUSY instead.
736 if (op == LK_TRYUPGRADE) {
737 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
744 * We have been unable to succeed in upgrading, so just
745 * give up the shared lock.
747 wakeup_swapper |= wakeupshlk(lk, file, line);
751 if (LK_CAN_WITNESS(flags))
752 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
753 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
757 * If curthread already holds the lock and this one is
758 * allowed to recurse, simply recurse on it.
760 if (lockmgr_xlocked(lk)) {
761 if ((flags & LK_CANRECURSE) == 0 &&
762 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
765 * If the lock is expected to not panic just
766 * give up and return.
768 if (LK_TRYOP(flags)) {
770 "%s: %p fails the try operation",
775 if (flags & LK_INTERLOCK)
776 class->lc_unlock(ilk);
777 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
778 __func__, iwmesg, file, line);
781 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
782 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
783 lk->lk_recurse, file, line);
784 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
785 LK_TRYWIT(flags), file, line);
786 TD_LOCKS_INC(curthread);
790 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
793 PMC_SOFT_CALL( , , lock, failed);
795 lock_profile_obtain_lock_failed(&lk->lock_object,
796 &contested, &waittime);
799 * If the lock is expected to not sleep just give up
802 if (LK_TRYOP(flags)) {
803 LOCK_LOG2(lk, "%s: %p fails the try operation",
809 #ifdef ADAPTIVE_LOCKMGRS
811 * If the owner is running on another CPU, spin until
812 * the owner stops running or the state of the lock
816 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
817 LK_HOLDER(x) != LK_KERNPROC) {
818 owner = (struct thread *)LK_HOLDER(x);
819 if (LOCK_LOG_TEST(&lk->lock_object, 0))
821 "%s: spinning on %p held by %p",
822 __func__, lk, owner);
823 KTR_STATE1(KTR_SCHED, "thread",
824 sched_tdname(td), "spinning",
825 "lockname:\"%s\"", lk->lock_object.lo_name);
828 * If we are holding also an interlock drop it
829 * in order to avoid a deadlock if the lockmgr
830 * owner is adaptively spinning on the
833 if (flags & LK_INTERLOCK) {
834 class->lc_unlock(ilk);
835 flags &= ~LK_INTERLOCK;
838 while (LK_HOLDER(lk->lk_lock) ==
839 (uintptr_t)owner && TD_IS_RUNNING(owner))
841 KTR_STATE0(KTR_SCHED, "thread",
842 sched_tdname(td), "running");
845 } else if (LK_CAN_ADAPT(lk, flags) &&
846 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
847 spintries < alk_retries) {
848 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
849 !atomic_cmpset_ptr(&lk->lk_lock, x,
850 x | LK_EXCLUSIVE_SPINNERS))
852 KTR_STATE1(KTR_SCHED, "thread",
853 sched_tdname(td), "spinning",
854 "lockname:\"%s\"", lk->lock_object.lo_name);
855 if (flags & LK_INTERLOCK) {
856 class->lc_unlock(ilk);
857 flags &= ~LK_INTERLOCK;
861 for (i = 0; i < alk_loops; i++) {
862 if (LOCK_LOG_TEST(&lk->lock_object, 0))
864 "%s: shared spinning on %p with %u and %u",
865 __func__, lk, spintries, i);
867 LK_EXCLUSIVE_SPINNERS) == 0)
871 KTR_STATE0(KTR_SCHED, "thread",
872 sched_tdname(td), "running");
880 * Acquire the sleepqueue chain lock because we
881 * probabilly will need to manipulate waiters flags.
883 sleepq_lock(&lk->lock_object);
887 * if the lock has been released while we spun on
888 * the sleepqueue chain lock just try again.
890 if (x == LK_UNLOCKED) {
891 sleepq_release(&lk->lock_object);
895 #ifdef ADAPTIVE_LOCKMGRS
897 * The current lock owner might have started executing
898 * on another CPU (or the lock could have changed
899 * owner) while we were waiting on the turnstile
900 * chain lock. If so, drop the turnstile lock and try
903 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
904 LK_HOLDER(x) != LK_KERNPROC) {
905 owner = (struct thread *)LK_HOLDER(x);
906 if (TD_IS_RUNNING(owner)) {
907 sleepq_release(&lk->lock_object);
914 * The lock can be in the state where there is a
915 * pending queue of waiters, but still no owner.
916 * This happens when the lock is contested and an
917 * owner is going to claim the lock.
918 * If curthread is the one successfully acquiring it
919 * claim lock ownership and return, preserving waiters
922 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
923 if ((x & ~v) == LK_UNLOCKED) {
924 v &= ~LK_EXCLUSIVE_SPINNERS;
925 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
927 sleepq_release(&lk->lock_object);
929 "%s: %p claimed by a new writer",
933 sleepq_release(&lk->lock_object);
938 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
939 * fail, loop back and retry.
941 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
942 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
943 x | LK_EXCLUSIVE_WAITERS)) {
944 sleepq_release(&lk->lock_object);
947 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
952 * As far as we have been unable to acquire the
953 * exclusive lock and the exclusive waiters flag
954 * is set, we will sleep.
956 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
958 flags &= ~LK_INTERLOCK;
961 "%s: interrupted sleep for %p with %d",
962 __func__, lk, error);
965 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
969 lock_profile_obtain_lock_success(&lk->lock_object,
970 contested, waittime, file, line);
971 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
972 lk->lk_recurse, file, line);
973 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
974 LK_TRYWIT(flags), file, line);
975 TD_LOCKS_INC(curthread);
980 _lockmgr_assert(lk, KA_XLOCKED, file, line);
981 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
982 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
985 * Panic if the lock is recursed.
987 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
988 if (flags & LK_INTERLOCK)
989 class->lc_unlock(ilk);
990 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
991 __func__, iwmesg, file, line);
993 TD_SLOCKS_INC(curthread);
996 * In order to preserve waiters flags, just spin.
1000 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1001 x &= LK_ALL_WAITERS;
1002 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1003 LK_SHARERS_LOCK(1) | x))
1009 _lockmgr_assert(lk, KA_LOCKED, file, line);
1012 if ((x & LK_SHARE) == 0) {
1015 * As first option, treact the lock as if it has not
1017 * Fix-up the tid var if the lock has been disowned.
1019 if (LK_HOLDER(x) == LK_KERNPROC)
1022 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1024 TD_LOCKS_DEC(curthread);
1026 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1027 lk->lk_recurse, file, line);
1030 * The lock is held in exclusive mode.
1031 * If the lock is recursed also, then unrecurse it.
1033 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1034 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1039 if (tid != LK_KERNPROC)
1040 lock_profile_release_lock(&lk->lock_object);
1042 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1046 sleepq_lock(&lk->lock_object);
1051 * If the lock has exclusive waiters, give them
1052 * preference in order to avoid deadlock with
1053 * shared runners up.
1054 * If interruptible sleeps left the exclusive queue
1055 * empty avoid a starvation for the threads sleeping
1056 * on the shared queue by giving them precedence
1057 * and cleaning up the exclusive waiters bit anyway.
1058 * Please note that lk_exslpfail count may be lying
1059 * about the real number of waiters with the
1060 * LK_SLEEPFAIL flag on because they may be used in
1061 * conjuction with interruptible sleeps so
1062 * lk_exslpfail might be considered an 'upper limit'
1063 * bound, including the edge cases.
1065 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1066 realexslp = sleepq_sleepcnt(&lk->lock_object,
1067 SQ_EXCLUSIVE_QUEUE);
1068 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1069 if (lk->lk_exslpfail < realexslp) {
1070 lk->lk_exslpfail = 0;
1071 queue = SQ_EXCLUSIVE_QUEUE;
1072 v |= (x & LK_SHARED_WAITERS);
1074 lk->lk_exslpfail = 0;
1076 "%s: %p has only LK_SLEEPFAIL sleepers",
1079 "%s: %p waking up threads on the exclusive queue",
1082 sleepq_broadcast(&lk->lock_object,
1083 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1084 queue = SQ_SHARED_QUEUE;
1089 * Exclusive waiters sleeping with LK_SLEEPFAIL
1090 * on and using interruptible sleeps/timeout
1091 * may have left spourious lk_exslpfail counts
1092 * on, so clean it up anyway.
1094 lk->lk_exslpfail = 0;
1095 queue = SQ_SHARED_QUEUE;
1099 "%s: %p waking up threads on the %s queue",
1100 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1102 atomic_store_rel_ptr(&lk->lk_lock, v);
1103 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1104 SLEEPQ_LK, 0, queue);
1105 sleepq_release(&lk->lock_object);
1108 wakeup_swapper = wakeupshlk(lk, file, line);
1111 if (LK_CAN_WITNESS(flags))
1112 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1113 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1117 * Trying to drain a lock we already own will result in a
1120 if (lockmgr_xlocked(lk)) {
1121 if (flags & LK_INTERLOCK)
1122 class->lc_unlock(ilk);
1123 panic("%s: draining %s with the lock held @ %s:%d\n",
1124 __func__, iwmesg, file, line);
1127 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1129 PMC_SOFT_CALL( , , lock, failed);
1131 lock_profile_obtain_lock_failed(&lk->lock_object,
1132 &contested, &waittime);
1135 * If the lock is expected to not sleep just give up
1138 if (LK_TRYOP(flags)) {
1139 LOCK_LOG2(lk, "%s: %p fails the try operation",
1146 * Acquire the sleepqueue chain lock because we
1147 * probabilly will need to manipulate waiters flags.
1149 sleepq_lock(&lk->lock_object);
1153 * if the lock has been released while we spun on
1154 * the sleepqueue chain lock just try again.
1156 if (x == LK_UNLOCKED) {
1157 sleepq_release(&lk->lock_object);
1161 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1162 if ((x & ~v) == LK_UNLOCKED) {
1163 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1166 * If interruptible sleeps left the exclusive
1167 * queue empty avoid a starvation for the
1168 * threads sleeping on the shared queue by
1169 * giving them precedence and cleaning up the
1170 * exclusive waiters bit anyway.
1171 * Please note that lk_exslpfail count may be
1172 * lying about the real number of waiters with
1173 * the LK_SLEEPFAIL flag on because they may
1174 * be used in conjuction with interruptible
1175 * sleeps so lk_exslpfail might be considered
1176 * an 'upper limit' bound, including the edge
1179 if (v & LK_EXCLUSIVE_WAITERS) {
1180 queue = SQ_EXCLUSIVE_QUEUE;
1181 v &= ~LK_EXCLUSIVE_WAITERS;
1185 * Exclusive waiters sleeping with
1186 * LK_SLEEPFAIL on and using
1187 * interruptible sleeps/timeout may
1188 * have left spourious lk_exslpfail
1189 * counts on, so clean it up anyway.
1191 MPASS(v & LK_SHARED_WAITERS);
1192 lk->lk_exslpfail = 0;
1193 queue = SQ_SHARED_QUEUE;
1194 v &= ~LK_SHARED_WAITERS;
1196 if (queue == SQ_EXCLUSIVE_QUEUE) {
1198 sleepq_sleepcnt(&lk->lock_object,
1199 SQ_EXCLUSIVE_QUEUE);
1200 if (lk->lk_exslpfail >= realexslp) {
1201 lk->lk_exslpfail = 0;
1202 queue = SQ_SHARED_QUEUE;
1203 v &= ~LK_SHARED_WAITERS;
1204 if (realexslp != 0) {
1206 "%s: %p has only LK_SLEEPFAIL sleepers",
1209 "%s: %p waking up threads on the exclusive queue",
1215 SQ_EXCLUSIVE_QUEUE);
1218 lk->lk_exslpfail = 0;
1220 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1221 sleepq_release(&lk->lock_object);
1225 "%s: %p waking up all threads on the %s queue",
1226 __func__, lk, queue == SQ_SHARED_QUEUE ?
1227 "shared" : "exclusive");
1228 wakeup_swapper |= sleepq_broadcast(
1229 &lk->lock_object, SLEEPQ_LK, 0, queue);
1232 * If shared waiters have been woken up we need
1233 * to wait for one of them to acquire the lock
1234 * before to set the exclusive waiters in
1235 * order to avoid a deadlock.
1237 if (queue == SQ_SHARED_QUEUE) {
1238 for (v = lk->lk_lock;
1239 (v & LK_SHARE) && !LK_SHARERS(v);
1246 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1247 * fail, loop back and retry.
1249 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1250 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1251 x | LK_EXCLUSIVE_WAITERS)) {
1252 sleepq_release(&lk->lock_object);
1255 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1260 * As far as we have been unable to acquire the
1261 * exclusive lock and the exclusive waiters flag
1262 * is set, we will sleep.
1264 if (flags & LK_INTERLOCK) {
1265 class->lc_unlock(ilk);
1266 flags &= ~LK_INTERLOCK;
1269 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1270 SQ_EXCLUSIVE_QUEUE);
1271 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1273 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1278 lock_profile_obtain_lock_success(&lk->lock_object,
1279 contested, waittime, file, line);
1280 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1281 lk->lk_recurse, file, line);
1282 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1283 LK_TRYWIT(flags), file, line);
1284 TD_LOCKS_INC(curthread);
1289 if (flags & LK_INTERLOCK)
1290 class->lc_unlock(ilk);
1291 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1294 if (flags & LK_INTERLOCK)
1295 class->lc_unlock(ilk);
1303 _lockmgr_disown(struct lock *lk, const char *file, int line)
1307 if (SCHEDULER_STOPPED())
1310 tid = (uintptr_t)curthread;
1311 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1314 * Panic if the lock is recursed.
1316 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1317 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1318 __func__, file, line);
1321 * If the owner is already LK_KERNPROC just skip the whole operation.
1323 if (LK_HOLDER(lk->lk_lock) != tid)
1325 lock_profile_release_lock(&lk->lock_object);
1326 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1327 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1328 TD_LOCKS_DEC(curthread);
1332 * In order to preserve waiters flags, just spin.
1336 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1337 x &= LK_ALL_WAITERS;
1338 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1346 lockmgr_printinfo(const struct lock *lk)
1351 if (lk->lk_lock == LK_UNLOCKED)
1352 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1353 else if (lk->lk_lock & LK_SHARE)
1354 printf("lock type %s: SHARED (count %ju)\n",
1355 lk->lock_object.lo_name,
1356 (uintmax_t)LK_SHARERS(lk->lk_lock));
1358 td = lockmgr_xholder(lk);
1359 if (td == (struct thread *)LK_KERNPROC)
1360 printf("lock type %s: EXCL by KERNPROC\n",
1361 lk->lock_object.lo_name);
1363 printf("lock type %s: EXCL by thread %p "
1364 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1365 td, td->td_proc->p_pid, td->td_proc->p_comm,
1370 if (x & LK_EXCLUSIVE_WAITERS)
1371 printf(" with exclusive waiters pending\n");
1372 if (x & LK_SHARED_WAITERS)
1373 printf(" with shared waiters pending\n");
1374 if (x & LK_EXCLUSIVE_SPINNERS)
1375 printf(" with exclusive spinners pending\n");
1381 lockstatus(const struct lock *lk)
1390 if ((x & LK_SHARE) == 0) {
1391 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1395 } else if (x == LK_UNLOCKED)
1401 #ifdef INVARIANT_SUPPORT
1403 FEATURE(invariant_support,
1404 "Support for modules compiled with INVARIANTS option");
1407 #undef _lockmgr_assert
1411 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1415 if (panicstr != NULL)
1419 case KA_SLOCKED | KA_NOTRECURSED:
1420 case KA_SLOCKED | KA_RECURSED:
1423 case KA_LOCKED | KA_NOTRECURSED:
1424 case KA_LOCKED | KA_RECURSED:
1428 * We cannot trust WITNESS if the lock is held in exclusive
1429 * mode and a call to lockmgr_disown() happened.
1430 * Workaround this skipping the check if the lock is held in
1431 * exclusive mode even for the KA_LOCKED case.
1433 if (slocked || (lk->lk_lock & LK_SHARE)) {
1434 witness_assert(&lk->lock_object, what, file, line);
1438 if (lk->lk_lock == LK_UNLOCKED ||
1439 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1440 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1441 panic("Lock %s not %slocked @ %s:%d\n",
1442 lk->lock_object.lo_name, slocked ? "share" : "",
1445 if ((lk->lk_lock & LK_SHARE) == 0) {
1446 if (lockmgr_recursed(lk)) {
1447 if (what & KA_NOTRECURSED)
1448 panic("Lock %s recursed @ %s:%d\n",
1449 lk->lock_object.lo_name, file,
1451 } else if (what & KA_RECURSED)
1452 panic("Lock %s not recursed @ %s:%d\n",
1453 lk->lock_object.lo_name, file, line);
1457 case KA_XLOCKED | KA_NOTRECURSED:
1458 case KA_XLOCKED | KA_RECURSED:
1459 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1460 panic("Lock %s not exclusively locked @ %s:%d\n",
1461 lk->lock_object.lo_name, file, line);
1462 if (lockmgr_recursed(lk)) {
1463 if (what & KA_NOTRECURSED)
1464 panic("Lock %s recursed @ %s:%d\n",
1465 lk->lock_object.lo_name, file, line);
1466 } else if (what & KA_RECURSED)
1467 panic("Lock %s not recursed @ %s:%d\n",
1468 lk->lock_object.lo_name, file, line);
1471 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1472 panic("Lock %s exclusively locked @ %s:%d\n",
1473 lk->lock_object.lo_name, file, line);
1476 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1484 lockmgr_chain(struct thread *td, struct thread **ownerp)
1490 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1492 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1493 if (lk->lk_lock & LK_SHARE)
1494 db_printf("SHARED (count %ju)\n",
1495 (uintmax_t)LK_SHARERS(lk->lk_lock));
1497 db_printf("EXCL\n");
1498 *ownerp = lockmgr_xholder(lk);
1504 db_show_lockmgr(const struct lock_object *lock)
1507 const struct lock *lk;
1509 lk = (const struct lock *)lock;
1511 db_printf(" state: ");
1512 if (lk->lk_lock == LK_UNLOCKED)
1513 db_printf("UNLOCKED\n");
1514 else if (lk->lk_lock & LK_SHARE)
1515 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1517 td = lockmgr_xholder(lk);
1518 if (td == (struct thread *)LK_KERNPROC)
1519 db_printf("XLOCK: LK_KERNPROC\n");
1521 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1522 td->td_tid, td->td_proc->p_pid,
1523 td->td_proc->p_comm);
1524 if (lockmgr_recursed(lk))
1525 db_printf(" recursed: %d\n", lk->lk_recurse);
1527 db_printf(" waiters: ");
1528 switch (lk->lk_lock & LK_ALL_WAITERS) {
1529 case LK_SHARED_WAITERS:
1530 db_printf("shared\n");
1532 case LK_EXCLUSIVE_WAITERS:
1533 db_printf("exclusive\n");
1535 case LK_ALL_WAITERS:
1536 db_printf("shared and exclusive\n");
1539 db_printf("none\n");
1541 db_printf(" spinners: ");
1542 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1543 db_printf("exclusive\n");
1545 db_printf("none\n");