2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_adaptive_lockmgrs.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_kdtrace.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
45 #include <sys/sleepqueue.h>
47 #include <sys/stack.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
52 #include <machine/cpu.h>
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64 (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
68 #define SQ_EXCLUSIVE_QUEUE 0
69 #define SQ_SHARED_QUEUE 1
72 #define _lockmgr_assert(lk, what, file, line)
73 #define TD_LOCKS_INC(td)
74 #define TD_LOCKS_DEC(td)
76 #define TD_LOCKS_INC(td) ((td)->td_locks++)
77 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
79 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
80 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
83 #define STACK_PRINT(lk)
84 #define STACK_SAVE(lk)
85 #define STACK_ZERO(lk)
87 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
88 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
89 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
92 #define LOCK_LOG2(lk, string, arg1, arg2) \
93 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
94 CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
96 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
97 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
99 #define GIANT_DECLARE \
101 WITNESS_SAVE_DECL(Giant)
102 #define GIANT_RESTORE() do { \
106 WITNESS_RESTORE(&Giant.lock_object, Giant); \
109 #define GIANT_SAVE() do { \
110 if (mtx_owned(&Giant)) { \
111 WITNESS_SAVE(&Giant.lock_object, Giant); \
112 while (mtx_owned(&Giant)) { \
114 mtx_unlock(&Giant); \
119 #define LK_CAN_SHARE(x, flags) \
120 (((x) & LK_SHARE) && \
121 (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \
122 (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \
123 (curthread->td_pflags & TDP_DEADLKTREAT)))
124 #define LK_TRYOP(x) \
127 #define LK_CAN_WITNESS(x) \
128 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
129 #define LK_TRYWIT(x) \
130 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
132 #define LK_CAN_ADAPT(lk, f) \
133 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
134 ((f) & LK_SLEEPFAIL) == 0)
136 #define lockmgr_disowned(lk) \
137 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
139 #define lockmgr_xlocked(lk) \
140 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
142 static void assert_lockmgr(const struct lock_object *lock, int how);
144 static void db_show_lockmgr(const struct lock_object *lock);
146 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
148 static int owner_lockmgr(const struct lock_object *lock,
149 struct thread **owner);
151 static uintptr_t unlock_lockmgr(struct lock_object *lock);
153 struct lock_class lock_class_lockmgr = {
154 .lc_name = "lockmgr",
155 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
156 .lc_assert = assert_lockmgr,
158 .lc_ddb_show = db_show_lockmgr,
160 .lc_lock = lock_lockmgr,
161 .lc_unlock = unlock_lockmgr,
163 .lc_owner = owner_lockmgr,
167 #ifdef ADAPTIVE_LOCKMGRS
168 static u_int alk_retries = 10;
169 static u_int alk_loops = 10000;
170 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
171 "lockmgr debugging");
172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
173 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
176 static __inline struct thread *
177 lockmgr_xholder(const struct lock *lk)
182 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
186 * It assumes sleepq_lock held and returns with this one unheld.
187 * It also assumes the generic interlock is sane and previously checked.
188 * If LK_INTERLOCK is specified the interlock is not reacquired after the
192 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
193 const char *wmesg, int pri, int timo, int queue)
196 struct lock_class *class;
199 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
200 catch = pri & PCATCH;
204 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
205 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
207 if (flags & LK_INTERLOCK)
208 class->lc_unlock(ilk);
209 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
212 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
213 SLEEPQ_INTERRUPTIBLE : 0), queue);
214 if ((flags & LK_TIMELOCK) && timo)
215 sleepq_set_timeout(&lk->lock_object, timo);
218 * Decisional switch for real sleeping.
220 if ((flags & LK_TIMELOCK) && timo && catch)
221 error = sleepq_timedwait_sig(&lk->lock_object, pri);
222 else if ((flags & LK_TIMELOCK) && timo)
223 error = sleepq_timedwait(&lk->lock_object, pri);
225 error = sleepq_wait_sig(&lk->lock_object, pri);
227 sleepq_wait(&lk->lock_object, pri);
229 if ((flags & LK_SLEEPFAIL) && error == 0)
236 wakeupshlk(struct lock *lk, const char *file, int line)
240 int queue, wakeup_swapper;
242 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
243 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
250 * If there is more than one shared lock held, just drop one
253 if (LK_SHARERS(x) > 1) {
254 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
261 * If there are not waiters on the exclusive queue, drop the
264 if ((x & LK_ALL_WAITERS) == 0) {
265 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
267 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
273 * We should have a sharer with waiters, so enter the hard
274 * path in order to handle wakeups correctly.
276 sleepq_lock(&lk->lock_object);
277 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
281 * If the lock has exclusive waiters, give them preference in
282 * order to avoid deadlock with shared runners up.
283 * If interruptible sleeps left the exclusive queue empty
284 * avoid a starvation for the threads sleeping on the shared
285 * queue by giving them precedence and cleaning up the
286 * exclusive waiters bit anyway.
287 * Please note that lk_exslpfail count may be lying about
288 * the real number of waiters with the LK_SLEEPFAIL flag on
289 * because they may be used in conjuction with interruptible
290 * sleeps so lk_exslpfail might be considered an 'upper limit'
291 * bound, including the edge cases.
293 realexslp = sleepq_sleepcnt(&lk->lock_object,
295 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
296 if (lk->lk_exslpfail < realexslp) {
297 lk->lk_exslpfail = 0;
298 queue = SQ_EXCLUSIVE_QUEUE;
299 v |= (x & LK_SHARED_WAITERS);
301 lk->lk_exslpfail = 0;
303 "%s: %p has only LK_SLEEPFAIL sleepers",
306 "%s: %p waking up threads on the exclusive queue",
309 sleepq_broadcast(&lk->lock_object,
310 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
311 queue = SQ_SHARED_QUEUE;
317 * Exclusive waiters sleeping with LK_SLEEPFAIL on
318 * and using interruptible sleeps/timeout may have
319 * left spourious lk_exslpfail counts on, so clean
322 lk->lk_exslpfail = 0;
323 queue = SQ_SHARED_QUEUE;
326 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
328 sleepq_release(&lk->lock_object);
331 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
332 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
334 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
336 sleepq_release(&lk->lock_object);
340 lock_profile_release_lock(&lk->lock_object);
341 TD_LOCKS_DEC(curthread);
342 TD_SLOCKS_DEC(curthread);
343 return (wakeup_swapper);
347 assert_lockmgr(const struct lock_object *lock, int what)
350 panic("lockmgr locks do not support assertions");
354 lock_lockmgr(struct lock_object *lock, uintptr_t how)
357 panic("lockmgr locks do not support sleep interlocking");
361 unlock_lockmgr(struct lock_object *lock)
364 panic("lockmgr locks do not support sleep interlocking");
369 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
372 panic("lockmgr locks do not support owner inquiring");
377 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
381 MPASS((flags & ~LK_INIT_MASK) == 0);
382 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
383 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
386 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
387 if (flags & LK_CANRECURSE)
388 iflags |= LO_RECURSABLE;
389 if ((flags & LK_NODUP) == 0)
391 if (flags & LK_NOPROFILE)
392 iflags |= LO_NOPROFILE;
393 if ((flags & LK_NOWITNESS) == 0)
394 iflags |= LO_WITNESS;
395 if (flags & LK_QUIET)
397 if (flags & LK_IS_VNODE)
398 iflags |= LO_IS_VNODE;
399 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
401 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
402 lk->lk_lock = LK_UNLOCKED;
404 lk->lk_exslpfail = 0;
411 * XXX: Gross hacks to manipulate external lock flags after
412 * initialization. Used for certain vnode and buf locks.
415 lockallowshare(struct lock *lk)
418 lockmgr_assert(lk, KA_XLOCKED);
419 lk->lock_object.lo_flags &= ~LK_NOSHARE;
423 lockdisableshare(struct lock *lk)
426 lockmgr_assert(lk, KA_XLOCKED);
427 lk->lock_object.lo_flags |= LK_NOSHARE;
431 lockallowrecurse(struct lock *lk)
434 lockmgr_assert(lk, KA_XLOCKED);
435 lk->lock_object.lo_flags |= LO_RECURSABLE;
439 lockdisablerecurse(struct lock *lk)
442 lockmgr_assert(lk, KA_XLOCKED);
443 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
447 lockdestroy(struct lock *lk)
450 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
451 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
452 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
453 lock_destroy(&lk->lock_object);
457 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
458 const char *wmesg, int pri, int timo, const char *file, int line)
461 struct lock_class *class;
465 int error, ipri, itimo, queue, wakeup_swapper;
466 #ifdef LOCK_PROFILING
467 uint64_t waittime = 0;
470 #ifdef ADAPTIVE_LOCKMGRS
471 volatile struct thread *owner;
472 u_int i, spintries = 0;
476 tid = (uintptr_t)curthread;
477 op = (flags & LK_TYPE_MASK);
478 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
479 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
480 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
482 MPASS((flags & ~LK_TOTAL_MASK) == 0);
483 KASSERT((op & (op - 1)) == 0,
484 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
485 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
486 (op != LK_DOWNGRADE && op != LK_RELEASE),
487 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
488 __func__, file, line));
489 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
490 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
491 __func__, file, line));
492 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
493 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
494 lk->lock_object.lo_name, file, line));
496 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
497 if (panicstr != NULL) {
498 if (flags & LK_INTERLOCK)
499 class->lc_unlock(ilk);
503 if (lk->lock_object.lo_flags & LK_NOSHARE) {
511 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
513 if (flags & LK_INTERLOCK)
514 class->lc_unlock(ilk);
522 if (LK_CAN_WITNESS(flags))
523 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
524 file, line, flags & LK_INTERLOCK ? ilk : NULL);
529 * If no other thread has an exclusive lock, or
530 * no exclusive waiter is present, bump the count of
531 * sharers. Since we have to preserve the state of
532 * waiters, if we fail to acquire the shared lock
533 * loop back and retry.
535 if (LK_CAN_SHARE(x, flags)) {
536 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
542 PMC_SOFT_CALL( , , lock, failed);
544 lock_profile_obtain_lock_failed(&lk->lock_object,
545 &contested, &waittime);
548 * If the lock is already held by curthread in
549 * exclusive way avoid a deadlock.
551 if (LK_HOLDER(x) == tid) {
553 "%s: %p already held in exclusive mode",
560 * If the lock is expected to not sleep just give up
563 if (LK_TRYOP(flags)) {
564 LOCK_LOG2(lk, "%s: %p fails the try operation",
570 #ifdef ADAPTIVE_LOCKMGRS
572 * If the owner is running on another CPU, spin until
573 * the owner stops running or the state of the lock
574 * changes. We need a double-state handle here
575 * because for a failed acquisition the lock can be
576 * either held in exclusive mode or shared mode
577 * (for the writer starvation avoidance technique).
579 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
580 LK_HOLDER(x) != LK_KERNPROC) {
581 owner = (struct thread *)LK_HOLDER(x);
582 if (LOCK_LOG_TEST(&lk->lock_object, 0))
584 "%s: spinning on %p held by %p",
585 __func__, lk, owner);
588 * If we are holding also an interlock drop it
589 * in order to avoid a deadlock if the lockmgr
590 * owner is adaptively spinning on the
593 if (flags & LK_INTERLOCK) {
594 class->lc_unlock(ilk);
595 flags &= ~LK_INTERLOCK;
598 while (LK_HOLDER(lk->lk_lock) ==
599 (uintptr_t)owner && TD_IS_RUNNING(owner))
603 } else if (LK_CAN_ADAPT(lk, flags) &&
604 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
605 spintries < alk_retries) {
606 if (flags & LK_INTERLOCK) {
607 class->lc_unlock(ilk);
608 flags &= ~LK_INTERLOCK;
612 for (i = 0; i < alk_loops; i++) {
613 if (LOCK_LOG_TEST(&lk->lock_object, 0))
615 "%s: shared spinning on %p with %u and %u",
616 __func__, lk, spintries, i);
618 if ((x & LK_SHARE) == 0 ||
619 LK_CAN_SHARE(x, flags) != 0)
630 * Acquire the sleepqueue chain lock because we
631 * probabilly will need to manipulate waiters flags.
633 sleepq_lock(&lk->lock_object);
637 * if the lock can be acquired in shared mode, try
640 if (LK_CAN_SHARE(x, flags)) {
641 sleepq_release(&lk->lock_object);
645 #ifdef ADAPTIVE_LOCKMGRS
647 * The current lock owner might have started executing
648 * on another CPU (or the lock could have changed
649 * owner) while we were waiting on the turnstile
650 * chain lock. If so, drop the turnstile lock and try
653 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
654 LK_HOLDER(x) != LK_KERNPROC) {
655 owner = (struct thread *)LK_HOLDER(x);
656 if (TD_IS_RUNNING(owner)) {
657 sleepq_release(&lk->lock_object);
664 * Try to set the LK_SHARED_WAITERS flag. If we fail,
665 * loop back and retry.
667 if ((x & LK_SHARED_WAITERS) == 0) {
668 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
669 x | LK_SHARED_WAITERS)) {
670 sleepq_release(&lk->lock_object);
673 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
678 * As far as we have been unable to acquire the
679 * shared lock and the shared waiters flag is set,
682 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
684 flags &= ~LK_INTERLOCK;
687 "%s: interrupted sleep for %p with %d",
688 __func__, lk, error);
691 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
695 lock_profile_obtain_lock_success(&lk->lock_object,
696 contested, waittime, file, line);
697 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
699 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
701 TD_LOCKS_INC(curthread);
702 TD_SLOCKS_INC(curthread);
708 _lockmgr_assert(lk, KA_SLOCKED, file, line);
710 x = v & LK_ALL_WAITERS;
711 v &= LK_EXCLUSIVE_SPINNERS;
714 * Try to switch from one shared lock to an exclusive one.
715 * We need to preserve waiters flags during the operation.
717 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
719 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
721 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
722 LK_TRYWIT(flags), file, line);
723 TD_SLOCKS_DEC(curthread);
728 * In LK_TRYUPGRADE mode, do not drop the lock,
729 * returning EBUSY instead.
731 if (op == LK_TRYUPGRADE) {
732 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
739 * We have been unable to succeed in upgrading, so just
740 * give up the shared lock.
742 wakeup_swapper |= wakeupshlk(lk, file, line);
746 if (LK_CAN_WITNESS(flags))
747 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
748 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
752 * If curthread already holds the lock and this one is
753 * allowed to recurse, simply recurse on it.
755 if (lockmgr_xlocked(lk)) {
756 if ((flags & LK_CANRECURSE) == 0 &&
757 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
760 * If the lock is expected to not panic just
761 * give up and return.
763 if (LK_TRYOP(flags)) {
765 "%s: %p fails the try operation",
770 if (flags & LK_INTERLOCK)
771 class->lc_unlock(ilk);
772 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
773 __func__, iwmesg, file, line);
776 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
777 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
778 lk->lk_recurse, file, line);
779 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
780 LK_TRYWIT(flags), file, line);
781 TD_LOCKS_INC(curthread);
785 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
788 PMC_SOFT_CALL( , , lock, failed);
790 lock_profile_obtain_lock_failed(&lk->lock_object,
791 &contested, &waittime);
794 * If the lock is expected to not sleep just give up
797 if (LK_TRYOP(flags)) {
798 LOCK_LOG2(lk, "%s: %p fails the try operation",
804 #ifdef ADAPTIVE_LOCKMGRS
806 * If the owner is running on another CPU, spin until
807 * the owner stops running or the state of the lock
811 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
812 LK_HOLDER(x) != LK_KERNPROC) {
813 owner = (struct thread *)LK_HOLDER(x);
814 if (LOCK_LOG_TEST(&lk->lock_object, 0))
816 "%s: spinning on %p held by %p",
817 __func__, lk, owner);
820 * If we are holding also an interlock drop it
821 * in order to avoid a deadlock if the lockmgr
822 * owner is adaptively spinning on the
825 if (flags & LK_INTERLOCK) {
826 class->lc_unlock(ilk);
827 flags &= ~LK_INTERLOCK;
830 while (LK_HOLDER(lk->lk_lock) ==
831 (uintptr_t)owner && TD_IS_RUNNING(owner))
835 } else if (LK_CAN_ADAPT(lk, flags) &&
836 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
837 spintries < alk_retries) {
838 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
839 !atomic_cmpset_ptr(&lk->lk_lock, x,
840 x | LK_EXCLUSIVE_SPINNERS))
842 if (flags & LK_INTERLOCK) {
843 class->lc_unlock(ilk);
844 flags &= ~LK_INTERLOCK;
848 for (i = 0; i < alk_loops; i++) {
849 if (LOCK_LOG_TEST(&lk->lock_object, 0))
851 "%s: shared spinning on %p with %u and %u",
852 __func__, lk, spintries, i);
854 LK_EXCLUSIVE_SPINNERS) == 0)
865 * Acquire the sleepqueue chain lock because we
866 * probabilly will need to manipulate waiters flags.
868 sleepq_lock(&lk->lock_object);
872 * if the lock has been released while we spun on
873 * the sleepqueue chain lock just try again.
875 if (x == LK_UNLOCKED) {
876 sleepq_release(&lk->lock_object);
880 #ifdef ADAPTIVE_LOCKMGRS
882 * The current lock owner might have started executing
883 * on another CPU (or the lock could have changed
884 * owner) while we were waiting on the turnstile
885 * chain lock. If so, drop the turnstile lock and try
888 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
889 LK_HOLDER(x) != LK_KERNPROC) {
890 owner = (struct thread *)LK_HOLDER(x);
891 if (TD_IS_RUNNING(owner)) {
892 sleepq_release(&lk->lock_object);
899 * The lock can be in the state where there is a
900 * pending queue of waiters, but still no owner.
901 * This happens when the lock is contested and an
902 * owner is going to claim the lock.
903 * If curthread is the one successfully acquiring it
904 * claim lock ownership and return, preserving waiters
907 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
908 if ((x & ~v) == LK_UNLOCKED) {
909 v &= ~LK_EXCLUSIVE_SPINNERS;
910 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
912 sleepq_release(&lk->lock_object);
914 "%s: %p claimed by a new writer",
918 sleepq_release(&lk->lock_object);
923 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
924 * fail, loop back and retry.
926 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
927 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
928 x | LK_EXCLUSIVE_WAITERS)) {
929 sleepq_release(&lk->lock_object);
932 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
937 * As far as we have been unable to acquire the
938 * exclusive lock and the exclusive waiters flag
939 * is set, we will sleep.
941 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
943 flags &= ~LK_INTERLOCK;
946 "%s: interrupted sleep for %p with %d",
947 __func__, lk, error);
950 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
954 lock_profile_obtain_lock_success(&lk->lock_object,
955 contested, waittime, file, line);
956 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
957 lk->lk_recurse, file, line);
958 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
959 LK_TRYWIT(flags), file, line);
960 TD_LOCKS_INC(curthread);
965 _lockmgr_assert(lk, KA_XLOCKED, file, line);
966 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
967 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
970 * Panic if the lock is recursed.
972 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
973 if (flags & LK_INTERLOCK)
974 class->lc_unlock(ilk);
975 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
976 __func__, iwmesg, file, line);
978 TD_SLOCKS_INC(curthread);
981 * In order to preserve waiters flags, just spin.
985 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
987 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
988 LK_SHARERS_LOCK(1) | x))
994 _lockmgr_assert(lk, KA_LOCKED, file, line);
997 if ((x & LK_SHARE) == 0) {
1000 * As first option, treact the lock as if it has not
1002 * Fix-up the tid var if the lock has been disowned.
1004 if (LK_HOLDER(x) == LK_KERNPROC)
1007 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1009 TD_LOCKS_DEC(curthread);
1011 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1012 lk->lk_recurse, file, line);
1015 * The lock is held in exclusive mode.
1016 * If the lock is recursed also, then unrecurse it.
1018 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1019 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1024 if (tid != LK_KERNPROC)
1025 lock_profile_release_lock(&lk->lock_object);
1027 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1031 sleepq_lock(&lk->lock_object);
1036 * If the lock has exclusive waiters, give them
1037 * preference in order to avoid deadlock with
1038 * shared runners up.
1039 * If interruptible sleeps left the exclusive queue
1040 * empty avoid a starvation for the threads sleeping
1041 * on the shared queue by giving them precedence
1042 * and cleaning up the exclusive waiters bit anyway.
1043 * Please note that lk_exslpfail count may be lying
1044 * about the real number of waiters with the
1045 * LK_SLEEPFAIL flag on because they may be used in
1046 * conjuction with interruptible sleeps so
1047 * lk_exslpfail might be considered an 'upper limit'
1048 * bound, including the edge cases.
1050 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1051 realexslp = sleepq_sleepcnt(&lk->lock_object,
1052 SQ_EXCLUSIVE_QUEUE);
1053 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1054 if (lk->lk_exslpfail < realexslp) {
1055 lk->lk_exslpfail = 0;
1056 queue = SQ_EXCLUSIVE_QUEUE;
1057 v |= (x & LK_SHARED_WAITERS);
1059 lk->lk_exslpfail = 0;
1061 "%s: %p has only LK_SLEEPFAIL sleepers",
1064 "%s: %p waking up threads on the exclusive queue",
1067 sleepq_broadcast(&lk->lock_object,
1068 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1069 queue = SQ_SHARED_QUEUE;
1074 * Exclusive waiters sleeping with LK_SLEEPFAIL
1075 * on and using interruptible sleeps/timeout
1076 * may have left spourious lk_exslpfail counts
1077 * on, so clean it up anyway.
1079 lk->lk_exslpfail = 0;
1080 queue = SQ_SHARED_QUEUE;
1084 "%s: %p waking up threads on the %s queue",
1085 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1087 atomic_store_rel_ptr(&lk->lk_lock, v);
1088 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1089 SLEEPQ_LK, 0, queue);
1090 sleepq_release(&lk->lock_object);
1093 wakeup_swapper = wakeupshlk(lk, file, line);
1096 if (LK_CAN_WITNESS(flags))
1097 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1098 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1102 * Trying to drain a lock we already own will result in a
1105 if (lockmgr_xlocked(lk)) {
1106 if (flags & LK_INTERLOCK)
1107 class->lc_unlock(ilk);
1108 panic("%s: draining %s with the lock held @ %s:%d\n",
1109 __func__, iwmesg, file, line);
1112 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1114 PMC_SOFT_CALL( , , lock, failed);
1116 lock_profile_obtain_lock_failed(&lk->lock_object,
1117 &contested, &waittime);
1120 * If the lock is expected to not sleep just give up
1123 if (LK_TRYOP(flags)) {
1124 LOCK_LOG2(lk, "%s: %p fails the try operation",
1131 * Acquire the sleepqueue chain lock because we
1132 * probabilly will need to manipulate waiters flags.
1134 sleepq_lock(&lk->lock_object);
1138 * if the lock has been released while we spun on
1139 * the sleepqueue chain lock just try again.
1141 if (x == LK_UNLOCKED) {
1142 sleepq_release(&lk->lock_object);
1146 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1147 if ((x & ~v) == LK_UNLOCKED) {
1148 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1151 * If interruptible sleeps left the exclusive
1152 * queue empty avoid a starvation for the
1153 * threads sleeping on the shared queue by
1154 * giving them precedence and cleaning up the
1155 * exclusive waiters bit anyway.
1156 * Please note that lk_exslpfail count may be
1157 * lying about the real number of waiters with
1158 * the LK_SLEEPFAIL flag on because they may
1159 * be used in conjuction with interruptible
1160 * sleeps so lk_exslpfail might be considered
1161 * an 'upper limit' bound, including the edge
1164 if (v & LK_EXCLUSIVE_WAITERS) {
1165 queue = SQ_EXCLUSIVE_QUEUE;
1166 v &= ~LK_EXCLUSIVE_WAITERS;
1170 * Exclusive waiters sleeping with
1171 * LK_SLEEPFAIL on and using
1172 * interruptible sleeps/timeout may
1173 * have left spourious lk_exslpfail
1174 * counts on, so clean it up anyway.
1176 MPASS(v & LK_SHARED_WAITERS);
1177 lk->lk_exslpfail = 0;
1178 queue = SQ_SHARED_QUEUE;
1179 v &= ~LK_SHARED_WAITERS;
1181 if (queue == SQ_EXCLUSIVE_QUEUE) {
1183 sleepq_sleepcnt(&lk->lock_object,
1184 SQ_EXCLUSIVE_QUEUE);
1185 if (lk->lk_exslpfail >= realexslp) {
1186 lk->lk_exslpfail = 0;
1187 queue = SQ_SHARED_QUEUE;
1188 v &= ~LK_SHARED_WAITERS;
1189 if (realexslp != 0) {
1191 "%s: %p has only LK_SLEEPFAIL sleepers",
1194 "%s: %p waking up threads on the exclusive queue",
1200 SQ_EXCLUSIVE_QUEUE);
1203 lk->lk_exslpfail = 0;
1205 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1206 sleepq_release(&lk->lock_object);
1210 "%s: %p waking up all threads on the %s queue",
1211 __func__, lk, queue == SQ_SHARED_QUEUE ?
1212 "shared" : "exclusive");
1213 wakeup_swapper |= sleepq_broadcast(
1214 &lk->lock_object, SLEEPQ_LK, 0, queue);
1217 * If shared waiters have been woken up we need
1218 * to wait for one of them to acquire the lock
1219 * before to set the exclusive waiters in
1220 * order to avoid a deadlock.
1222 if (queue == SQ_SHARED_QUEUE) {
1223 for (v = lk->lk_lock;
1224 (v & LK_SHARE) && !LK_SHARERS(v);
1231 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1232 * fail, loop back and retry.
1234 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1235 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1236 x | LK_EXCLUSIVE_WAITERS)) {
1237 sleepq_release(&lk->lock_object);
1240 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1245 * As far as we have been unable to acquire the
1246 * exclusive lock and the exclusive waiters flag
1247 * is set, we will sleep.
1249 if (flags & LK_INTERLOCK) {
1250 class->lc_unlock(ilk);
1251 flags &= ~LK_INTERLOCK;
1254 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1255 SQ_EXCLUSIVE_QUEUE);
1256 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1258 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1263 lock_profile_obtain_lock_success(&lk->lock_object,
1264 contested, waittime, file, line);
1265 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1266 lk->lk_recurse, file, line);
1267 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1268 LK_TRYWIT(flags), file, line);
1269 TD_LOCKS_INC(curthread);
1274 if (flags & LK_INTERLOCK)
1275 class->lc_unlock(ilk);
1276 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1279 if (flags & LK_INTERLOCK)
1280 class->lc_unlock(ilk);
1288 _lockmgr_disown(struct lock *lk, const char *file, int line)
1292 if (SCHEDULER_STOPPED())
1295 tid = (uintptr_t)curthread;
1296 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1299 * Panic if the lock is recursed.
1301 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1302 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1303 __func__, file, line);
1306 * If the owner is already LK_KERNPROC just skip the whole operation.
1308 if (LK_HOLDER(lk->lk_lock) != tid)
1310 lock_profile_release_lock(&lk->lock_object);
1311 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1312 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1313 TD_LOCKS_DEC(curthread);
1317 * In order to preserve waiters flags, just spin.
1321 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1322 x &= LK_ALL_WAITERS;
1323 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1331 lockmgr_printinfo(const struct lock *lk)
1336 if (lk->lk_lock == LK_UNLOCKED)
1337 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1338 else if (lk->lk_lock & LK_SHARE)
1339 printf("lock type %s: SHARED (count %ju)\n",
1340 lk->lock_object.lo_name,
1341 (uintmax_t)LK_SHARERS(lk->lk_lock));
1343 td = lockmgr_xholder(lk);
1344 if (td == (struct thread *)LK_KERNPROC)
1345 printf("lock type %s: EXCL by KERNPROC\n",
1346 lk->lock_object.lo_name);
1348 printf("lock type %s: EXCL by thread %p "
1349 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1350 td, td->td_proc->p_pid, td->td_proc->p_comm,
1355 if (x & LK_EXCLUSIVE_WAITERS)
1356 printf(" with exclusive waiters pending\n");
1357 if (x & LK_SHARED_WAITERS)
1358 printf(" with shared waiters pending\n");
1359 if (x & LK_EXCLUSIVE_SPINNERS)
1360 printf(" with exclusive spinners pending\n");
1366 lockstatus(const struct lock *lk)
1375 if ((x & LK_SHARE) == 0) {
1376 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1380 } else if (x == LK_UNLOCKED)
1386 #ifdef INVARIANT_SUPPORT
1388 FEATURE(invariant_support,
1389 "Support for modules compiled with INVARIANTS option");
1392 #undef _lockmgr_assert
1396 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1400 if (panicstr != NULL)
1404 case KA_SLOCKED | KA_NOTRECURSED:
1405 case KA_SLOCKED | KA_RECURSED:
1408 case KA_LOCKED | KA_NOTRECURSED:
1409 case KA_LOCKED | KA_RECURSED:
1413 * We cannot trust WITNESS if the lock is held in exclusive
1414 * mode and a call to lockmgr_disown() happened.
1415 * Workaround this skipping the check if the lock is held in
1416 * exclusive mode even for the KA_LOCKED case.
1418 if (slocked || (lk->lk_lock & LK_SHARE)) {
1419 witness_assert(&lk->lock_object, what, file, line);
1423 if (lk->lk_lock == LK_UNLOCKED ||
1424 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1425 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1426 panic("Lock %s not %slocked @ %s:%d\n",
1427 lk->lock_object.lo_name, slocked ? "share" : "",
1430 if ((lk->lk_lock & LK_SHARE) == 0) {
1431 if (lockmgr_recursed(lk)) {
1432 if (what & KA_NOTRECURSED)
1433 panic("Lock %s recursed @ %s:%d\n",
1434 lk->lock_object.lo_name, file,
1436 } else if (what & KA_RECURSED)
1437 panic("Lock %s not recursed @ %s:%d\n",
1438 lk->lock_object.lo_name, file, line);
1442 case KA_XLOCKED | KA_NOTRECURSED:
1443 case KA_XLOCKED | KA_RECURSED:
1444 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1445 panic("Lock %s not exclusively locked @ %s:%d\n",
1446 lk->lock_object.lo_name, file, line);
1447 if (lockmgr_recursed(lk)) {
1448 if (what & KA_NOTRECURSED)
1449 panic("Lock %s recursed @ %s:%d\n",
1450 lk->lock_object.lo_name, file, line);
1451 } else if (what & KA_RECURSED)
1452 panic("Lock %s not recursed @ %s:%d\n",
1453 lk->lock_object.lo_name, file, line);
1456 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1457 panic("Lock %s exclusively locked @ %s:%d\n",
1458 lk->lock_object.lo_name, file, line);
1461 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1469 lockmgr_chain(struct thread *td, struct thread **ownerp)
1475 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1477 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1478 if (lk->lk_lock & LK_SHARE)
1479 db_printf("SHARED (count %ju)\n",
1480 (uintmax_t)LK_SHARERS(lk->lk_lock));
1482 db_printf("EXCL\n");
1483 *ownerp = lockmgr_xholder(lk);
1489 db_show_lockmgr(const struct lock_object *lock)
1492 const struct lock *lk;
1494 lk = (const struct lock *)lock;
1496 db_printf(" state: ");
1497 if (lk->lk_lock == LK_UNLOCKED)
1498 db_printf("UNLOCKED\n");
1499 else if (lk->lk_lock & LK_SHARE)
1500 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1502 td = lockmgr_xholder(lk);
1503 if (td == (struct thread *)LK_KERNPROC)
1504 db_printf("XLOCK: LK_KERNPROC\n");
1506 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1507 td->td_tid, td->td_proc->p_pid,
1508 td->td_proc->p_comm);
1509 if (lockmgr_recursed(lk))
1510 db_printf(" recursed: %d\n", lk->lk_recurse);
1512 db_printf(" waiters: ");
1513 switch (lk->lk_lock & LK_ALL_WAITERS) {
1514 case LK_SHARED_WAITERS:
1515 db_printf("shared\n");
1517 case LK_EXCLUSIVE_WAITERS:
1518 db_printf("exclusive\n");
1520 case LK_ALL_WAITERS:
1521 db_printf("shared and exclusive\n");
1524 db_printf("none\n");
1526 db_printf(" spinners: ");
1527 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1528 db_printf("exclusive\n");
1530 db_printf("none\n");