2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 * promote products derived from this software without specific prior
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
35 * Machine independent bits of mutex implementation.
38 #include <sys/cdefs.h>
39 #include "opt_adaptive_mutexes.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
49 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
70 #include <fs/devfs/devfs_int.h>
73 #include <vm/vm_extern.h>
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define ADAPTIVE_MUTEXES
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
85 * Return the mutex address when the lock cookie address is provided.
86 * This functionality assumes that struct mtx* have a member named mtx_lock.
88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
91 * Internal utility macros.
93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
97 static void assert_mtx(const struct lock_object *lock, int what);
99 static void db_show_mtx(const struct lock_object *lock);
101 static void lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void lock_spin(struct lock_object *lock, uintptr_t how);
104 static int owner_mtx(const struct lock_object *lock,
105 struct thread **owner);
107 static uintptr_t unlock_mtx(struct lock_object *lock);
108 static uintptr_t unlock_spin(struct lock_object *lock);
111 * Lock classes for sleep and spin mutexes.
113 struct lock_class lock_class_mtx_sleep = {
114 .lc_name = "sleep mutex",
115 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
116 .lc_assert = assert_mtx,
118 .lc_ddb_show = db_show_mtx,
121 .lc_unlock = unlock_mtx,
123 .lc_owner = owner_mtx,
126 struct lock_class lock_class_mtx_spin = {
127 .lc_name = "spin mutex",
128 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
129 .lc_assert = assert_mtx,
131 .lc_ddb_show = db_show_mtx,
133 .lc_lock = lock_spin,
134 .lc_unlock = unlock_spin,
136 .lc_owner = owner_mtx,
140 #ifdef ADAPTIVE_MUTEXES
141 #ifdef MUTEX_CUSTOM_BACKOFF
142 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
145 static struct lock_delay_config __read_frequently mtx_delay;
147 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
149 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
152 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
154 #define mtx_delay locks_delay
158 #ifdef MUTEX_SPIN_CUSTOM_BACKOFF
159 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin,
160 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
161 "mtx spin debugging");
163 static struct lock_delay_config __read_frequently mtx_spin_delay;
165 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
166 &mtx_spin_delay.base, 0, "");
167 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
168 &mtx_spin_delay.max, 0, "");
170 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
172 #define mtx_spin_delay locks_delay
176 * System-wide mutexes
178 struct mtx blocked_lock;
179 struct mtx __exclusive_cache_line Giant;
181 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
184 assert_mtx(const struct lock_object *lock, int what)
188 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
190 * Some callers of lc_assert uses LA_LOCKED to indicate that either
191 * a shared lock or write lock was held, while other callers uses
192 * the more strict LA_XLOCKED (used as MA_OWNED).
194 * Mutex is the only lock class that can not be shared, as a result,
195 * we can reasonably consider the caller really intends to assert
196 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
198 if (what & LA_LOCKED) {
202 mtx_assert((const struct mtx *)lock, what);
206 lock_mtx(struct lock_object *lock, uintptr_t how)
209 mtx_lock((struct mtx *)lock);
213 lock_spin(struct lock_object *lock, uintptr_t how)
216 mtx_lock_spin((struct mtx *)lock);
220 unlock_mtx(struct lock_object *lock)
224 m = (struct mtx *)lock;
225 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
231 unlock_spin(struct lock_object *lock)
235 m = (struct mtx *)lock;
236 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
243 owner_mtx(const struct lock_object *lock, struct thread **owner)
248 m = (const struct mtx *)lock;
250 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
251 return (*owner != NULL);
256 * Function versions of the inlined __mtx_* macros. These are used by
257 * modules and can also be called from assembly language if needed.
260 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
267 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
268 !TD_IS_IDLETHREAD(curthread),
269 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
270 curthread, m->lock_object.lo_name, file, line));
271 KASSERT(m->mtx_lock != MTX_DESTROYED,
272 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
273 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
274 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
276 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
277 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
279 tid = (uintptr_t)curthread;
281 if (!_mtx_obtain_lock_fetch(m, &v, tid))
282 _mtx_lock_sleep(m, v, opts, file, line);
284 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
285 m, 0, 0, file, line);
286 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
288 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
290 TD_LOCKS_INC(curthread);
294 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
300 KASSERT(m->mtx_lock != MTX_DESTROYED,
301 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
302 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
303 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
305 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
306 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
308 mtx_assert(m, MA_OWNED);
310 #ifdef LOCK_PROFILING
311 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
313 __mtx_unlock(m, curthread, opts, file, line);
315 TD_LOCKS_DEC(curthread);
319 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
329 KASSERT(m->mtx_lock != MTX_DESTROYED,
330 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
331 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
332 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
333 m->lock_object.lo_name, file, line));
335 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
336 (opts & MTX_RECURSE) != 0,
337 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
338 m->lock_object.lo_name, file, line));
339 opts &= ~MTX_RECURSE;
340 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
344 tid = (uintptr_t)curthread;
346 if (!_mtx_obtain_lock_fetch(m, &v, tid))
347 _mtx_lock_spin(m, v, opts, file, line);
349 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,
350 m, 0, 0, file, line);
352 __mtx_lock_spin(m, curthread, opts, file, line);
354 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
356 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
360 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
365 if (SCHEDULER_STOPPED())
370 KASSERT(m->mtx_lock != MTX_DESTROYED,
371 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
372 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
373 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
374 m->lock_object.lo_name, file, line));
375 KASSERT((opts & MTX_RECURSE) == 0,
376 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
377 m->lock_object.lo_name, file, line));
378 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
379 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
380 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
383 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
388 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
395 KASSERT(m->mtx_lock != MTX_DESTROYED,
396 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
397 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
398 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
399 m->lock_object.lo_name, file, line));
400 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
401 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
403 mtx_assert(m, MA_OWNED);
405 __mtx_unlock_spin(m);
409 * The important part of mtx_trylock{,_flags}()
410 * Tries to acquire lock `m.' If this function is called on a mutex that
411 * is already owned, it will recursively acquire the lock.
414 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
418 #ifdef LOCK_PROFILING
419 uint64_t waittime = 0;
427 if (SCHEDULER_STOPPED_TD(td))
430 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
431 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
432 curthread, m->lock_object.lo_name, file, line));
433 KASSERT(m->mtx_lock != MTX_DESTROYED,
434 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
435 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
436 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
443 if (_mtx_obtain_lock_fetch(m, &v, tid))
445 if (v == MTX_UNOWNED)
448 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
449 (opts & MTX_RECURSE) != 0)) {
451 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
459 opts &= ~MTX_RECURSE;
461 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
463 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
465 TD_LOCKS_INC(curthread);
467 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
468 m, contested, waittime, file, line);
475 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
480 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
484 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
486 * We call this if the lock is either contested (i.e. we need to go to
487 * sleep waiting for it), or if we need to recurse on it.
491 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
495 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
500 struct turnstile *ts;
502 struct thread *owner;
503 #ifdef LOCK_PROFILING
505 uint64_t waittime = 0;
507 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
508 struct lock_delay_arg lda;
512 int64_t sleep_time = 0;
513 int64_t all_time = 0;
515 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
516 int doing_lockprof = 0;
524 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
525 while (v == MTX_UNOWNED) {
526 if (_mtx_obtain_lock_fetch(m, &v, tid))
530 all_time -= lockstat_nsecs(&m->lock_object);
533 #ifdef LOCK_PROFILING
537 if (SCHEDULER_STOPPED_TD(td))
540 if (__predict_false(v == MTX_UNOWNED))
541 v = MTX_READ_VALUE(m);
543 if (__predict_false(lv_mtx_owner(v) == td)) {
544 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
545 (opts & MTX_RECURSE) != 0,
546 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
547 m->lock_object.lo_name, file, line));
549 opts &= ~MTX_RECURSE;
552 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
553 if (LOCK_LOG_TEST(&m->lock_object, opts))
554 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
558 opts &= ~MTX_RECURSE;
561 #if defined(ADAPTIVE_MUTEXES)
562 lock_delay_arg_init(&lda, &mtx_delay);
563 #elif defined(KDTRACE_HOOKS)
564 lock_delay_arg_init_noadapt(&lda);
568 PMC_SOFT_CALL( , , lock, failed);
570 lock_profile_obtain_lock_failed(&m->lock_object, false,
571 &contested, &waittime);
572 if (LOCK_LOG_TEST(&m->lock_object, opts))
574 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
575 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
578 if (v == MTX_UNOWNED) {
579 if (_mtx_obtain_lock_fetch(m, &v, tid))
586 #ifdef ADAPTIVE_MUTEXES
588 * If the owner is running on another CPU, spin until the
589 * owner stops running or the state of the lock changes.
591 owner = lv_mtx_owner(v);
592 if (TD_IS_RUNNING(owner)) {
593 if (LOCK_LOG_TEST(&m->lock_object, 0))
595 "%s: spinning on %p held by %p",
597 KTR_STATE1(KTR_SCHED, "thread",
598 sched_tdname((struct thread *)tid),
599 "spinning", "lockname:\"%s\"",
600 m->lock_object.lo_name);
603 v = MTX_READ_VALUE(m);
604 owner = lv_mtx_owner(v);
605 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
606 KTR_STATE0(KTR_SCHED, "thread",
607 sched_tdname((struct thread *)tid),
613 ts = turnstile_trywait(&m->lock_object);
614 v = MTX_READ_VALUE(m);
618 * Check if the lock has been released while spinning for
619 * the turnstile chain lock.
621 if (v == MTX_UNOWNED) {
622 turnstile_cancel(ts);
626 #ifdef ADAPTIVE_MUTEXES
628 * The current lock owner might have started executing
629 * on another CPU (or the lock could have changed
630 * owners) while we were waiting on the turnstile
631 * chain lock. If so, drop the turnstile lock and try
634 owner = lv_mtx_owner(v);
635 if (TD_IS_RUNNING(owner)) {
636 turnstile_cancel(ts);
642 * If the mutex isn't already contested and a failure occurs
643 * setting the contested bit, the mutex was either released
644 * or the state of the MTX_RECURSED bit changed.
646 if ((v & MTX_CONTESTED) == 0 &&
647 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
648 goto retry_turnstile;
652 * We definitely must sleep for this lock.
654 mtx_assert(m, MA_NOTOWNED);
657 * Block on the turnstile.
660 sleep_time -= lockstat_nsecs(&m->lock_object);
662 #ifndef ADAPTIVE_MUTEXES
663 owner = mtx_owner(m);
665 MPASS(owner == mtx_owner(m));
666 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
668 sleep_time += lockstat_nsecs(&m->lock_object);
671 v = MTX_READ_VALUE(m);
673 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
674 if (__predict_true(!doing_lockprof))
678 all_time += lockstat_nsecs(&m->lock_object);
680 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
683 * Only record the loops spinning and not sleeping.
685 if (lda.spin_cnt > sleep_cnt)
686 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
689 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
690 waittime, file, line);
695 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
697 * This is only called if we need to actually spin for the lock. Recursion
702 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
703 const char *file, int line)
706 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
710 struct lock_delay_arg lda;
712 #ifdef LOCK_PROFILING
714 uint64_t waittime = 0;
717 int64_t spin_time = 0;
719 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
720 int doing_lockprof = 0;
723 tid = (uintptr_t)curthread;
727 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
728 while (v == MTX_UNOWNED) {
729 if (_mtx_obtain_lock_fetch(m, &v, tid))
733 spin_time -= lockstat_nsecs(&m->lock_object);
736 #ifdef LOCK_PROFILING
740 if (__predict_false(v == MTX_UNOWNED))
741 v = MTX_READ_VALUE(m);
743 if (__predict_false(v == tid)) {
748 if (SCHEDULER_STOPPED())
751 if (LOCK_LOG_TEST(&m->lock_object, opts))
752 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
753 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
754 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
756 lock_delay_arg_init(&lda, &mtx_spin_delay);
759 PMC_SOFT_CALL( , , lock, failed);
761 lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime);
764 if (v == MTX_UNOWNED) {
765 if (_mtx_obtain_lock_fetch(m, &v, tid))
769 /* Give interrupts a chance while we spin. */
772 if (__predict_true(lda.spin_cnt < 10000000)) {
775 _mtx_lock_indefinite_check(m, &lda);
777 v = MTX_READ_VALUE(m);
778 } while (v != MTX_UNOWNED);
782 if (LOCK_LOG_TEST(&m->lock_object, opts))
783 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
784 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
787 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
788 if (__predict_true(!doing_lockprof))
792 spin_time += lockstat_nsecs(&m->lock_object);
793 if (lda.spin_cnt != 0)
794 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
797 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
798 contested, waittime, file, line);
804 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
807 KASSERT(m->mtx_lock != MTX_DESTROYED,
808 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
809 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
810 ("thread_lock() of sleep mutex %s @ %s:%d",
811 m->lock_object.lo_name, file, line));
812 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
813 ("thread_lock: got a recursive mutex %s @ %s:%d\n",
814 m->lock_object.lo_name, file, line));
815 WITNESS_CHECKORDER(&m->lock_object,
816 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
819 #define thread_lock_validate(m, opts, file, line) do { } while (0)
822 #ifndef LOCK_PROFILING
825 _thread_lock(struct thread *td, int opts, const char *file, int line)
828 _thread_lock(struct thread *td)
834 tid = (uintptr_t)curthread;
836 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
840 thread_lock_validate(m, 0, file, line);
841 if (__predict_false(m == &blocked_lock))
842 goto slowpath_unlocked;
843 if (__predict_false(!_mtx_obtain_lock(m, tid)))
844 goto slowpath_unlocked;
845 if (__predict_true(m == td->td_lock)) {
846 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
849 _mtx_release_lock_quick(m);
854 thread_lock_flags_(td, opts, file, line);
856 thread_lock_flags_(td, 0, 0, 0);
862 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
866 struct lock_delay_arg lda;
867 #ifdef LOCK_PROFILING
869 uint64_t waittime = 0;
872 int64_t spin_time = 0;
874 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
875 int doing_lockprof = 1;
878 tid = (uintptr_t)curthread;
880 if (SCHEDULER_STOPPED()) {
882 * Ensure that spinlock sections are balanced even when the
883 * scheduler is stopped, since we may otherwise inadvertently
884 * re-enable interrupts while dumping core.
890 lock_delay_arg_init(&lda, &mtx_spin_delay);
893 PMC_SOFT_CALL( , , lock, failed);
896 #ifdef LOCK_PROFILING
898 #elif defined(KDTRACE_HOOKS)
899 doing_lockprof = lockstat_enabled;
902 if (__predict_false(doing_lockprof))
903 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
910 thread_lock_validate(m, opts, file, line);
911 v = MTX_READ_VALUE(m);
913 if (v == MTX_UNOWNED) {
914 if (_mtx_obtain_lock_fetch(m, &v, tid))
919 lock_profile_obtain_lock_failed(&m->lock_object, true,
920 &contested, &waittime);
921 /* Give interrupts a chance while we spin. */
924 if (__predict_true(lda.spin_cnt < 10000000)) {
927 _mtx_lock_indefinite_check(m, &lda);
929 if (m != td->td_lock) {
933 v = MTX_READ_VALUE(m);
934 } while (v != MTX_UNOWNED);
937 if (m == td->td_lock)
939 _mtx_release_lock_quick(m);
941 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
943 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
945 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
946 if (__predict_true(!doing_lockprof))
950 spin_time += lockstat_nsecs(&m->lock_object);
952 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
953 waittime, file, line);
955 if (lda.spin_cnt != 0)
956 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
961 thread_lock_block(struct thread *td)
966 mtx_assert(lock, MA_OWNED);
967 td->td_lock = &blocked_lock;
973 thread_lock_unblock(struct thread *td, struct mtx *new)
976 mtx_assert(new, MA_OWNED);
977 KASSERT(td->td_lock == &blocked_lock,
978 ("thread %p lock %p not blocked_lock %p",
979 td, td->td_lock, &blocked_lock));
980 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
984 thread_lock_block_wait(struct thread *td)
987 while (td->td_lock == &blocked_lock)
990 /* Acquire fence to be certain that all thread state is visible. */
991 atomic_thread_fence_acq();
995 thread_lock_set(struct thread *td, struct mtx *new)
999 mtx_assert(new, MA_OWNED);
1001 mtx_assert(lock, MA_OWNED);
1003 mtx_unlock_spin(lock);
1007 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
1009 * We are only called here if the lock is recursed, contested (i.e. we
1010 * need to wake up a blocked thread) or lockstat probe is active.
1014 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1015 const char *file, int line)
1018 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1022 struct turnstile *ts;
1025 if (SCHEDULER_STOPPED())
1028 tid = (uintptr_t)curthread;
1031 if (__predict_false(v == tid))
1032 v = MTX_READ_VALUE(m);
1034 if (__predict_false(v & MTX_RECURSED)) {
1035 if (--(m->mtx_recurse) == 0)
1036 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1037 if (LOCK_LOG_TEST(&m->lock_object, opts))
1038 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1042 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1043 if (v == tid && _mtx_release_lock(m, tid))
1047 * We have to lock the chain before the turnstile so this turnstile
1048 * can be removed from the hash list if it is empty.
1050 turnstile_chain_lock(&m->lock_object);
1051 _mtx_release_lock_quick(m);
1052 ts = turnstile_lookup(&m->lock_object);
1054 if (LOCK_LOG_TEST(&m->lock_object, opts))
1055 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1056 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1059 * This turnstile is now no longer associated with the mutex. We can
1060 * unlock the chain lock so a new turnstile may take it's place.
1062 turnstile_unpend(ts);
1063 turnstile_chain_unlock(&m->lock_object);
1067 * All the unlocking of MTX_SPIN locks is done inline.
1068 * See the __mtx_unlock_spin() macro for the details.
1072 * The backing function for the INVARIANTS-enabled mtx_assert()
1074 #ifdef INVARIANT_SUPPORT
1076 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1078 const struct mtx *m;
1080 if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED())
1087 case MA_OWNED | MA_RECURSED:
1088 case MA_OWNED | MA_NOTRECURSED:
1090 panic("mutex %s not owned at %s:%d",
1091 m->lock_object.lo_name, file, line);
1092 if (mtx_recursed(m)) {
1093 if ((what & MA_NOTRECURSED) != 0)
1094 panic("mutex %s recursed at %s:%d",
1095 m->lock_object.lo_name, file, line);
1096 } else if ((what & MA_RECURSED) != 0) {
1097 panic("mutex %s unrecursed at %s:%d",
1098 m->lock_object.lo_name, file, line);
1103 panic("mutex %s owned at %s:%d",
1104 m->lock_object.lo_name, file, line);
1107 panic("unknown mtx_assert at %s:%d", file, line);
1113 * General init routine used by the MTX_SYSINIT() macro.
1116 mtx_sysinit(void *arg)
1118 struct mtx_args *margs = arg;
1120 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1125 * Mutex initialization routine; initialize lock `m' of type contained in
1126 * `opts' with options contained in `opts' and name `name.' The optional
1127 * lock type `type' is used as a general lock category name for use with
1131 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1134 struct lock_class *class;
1139 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1140 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1141 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1142 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1145 /* Determine lock class and lock flags. */
1146 if (opts & MTX_SPIN)
1147 class = &lock_class_mtx_spin;
1149 class = &lock_class_mtx_sleep;
1151 if (opts & MTX_QUIET)
1153 if (opts & MTX_RECURSE)
1154 flags |= LO_RECURSABLE;
1155 if ((opts & MTX_NOWITNESS) == 0)
1156 flags |= LO_WITNESS;
1157 if (opts & MTX_DUPOK)
1159 if (opts & MTX_NOPROFILE)
1160 flags |= LO_NOPROFILE;
1164 /* Initialize mutex. */
1165 lock_init(&m->lock_object, class, name, type, flags);
1167 m->mtx_lock = MTX_UNOWNED;
1172 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1173 * passed in as a flag here because if the corresponding mtx_init() was
1174 * called with MTX_QUIET set, then it will already be set in the mutex's
1178 _mtx_destroy(volatile uintptr_t *c)
1185 MPASS(mtx_unowned(m));
1187 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1189 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1190 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
1191 lock_profile_release_lock(&m->lock_object, true);
1194 TD_LOCKS_DEC(curthread);
1195 lock_profile_release_lock(&m->lock_object, false);
1198 /* Tell witness this isn't locked to make it happy. */
1199 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1203 m->mtx_lock = MTX_DESTROYED;
1204 lock_destroy(&m->lock_object);
1208 * Intialize the mutex code and system mutexes. This is called from the MD
1209 * startup code prior to mi_startup(). The per-CPU data space needs to be
1210 * setup before this is called.
1216 /* Setup turnstiles so that sleep mutexes work. */
1220 * Initialize mutexes.
1222 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1223 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1224 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1225 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1226 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1227 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1228 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1229 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1230 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1234 static void __noinline
1235 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1240 if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED())
1245 /* If the mutex is unlocked, try again. */
1249 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1250 m, m->lock_object.lo_name, td, td->td_tid);
1252 witness_display_spinlock(&m->lock_object, td, printf);
1254 panic("spin lock held too long");
1260 mtx_spin_wait_unlocked(struct mtx *m)
1262 struct lock_delay_arg lda;
1264 KASSERT(m->mtx_lock != MTX_DESTROYED,
1265 ("%s() of destroyed mutex %p", __func__, m));
1266 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1267 ("%s() of sleep mutex %p (%s)", __func__, m,
1268 m->lock_object.lo_name));
1269 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1270 m->lock_object.lo_name));
1274 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1275 if (__predict_true(lda.spin_cnt < 10000000)) {
1279 _mtx_lock_indefinite_check(m, &lda);
1285 mtx_wait_unlocked(struct mtx *m)
1287 struct thread *owner;
1290 KASSERT(m->mtx_lock != MTX_DESTROYED,
1291 ("%s() of destroyed mutex %p", __func__, m));
1292 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
1293 ("%s() not a sleep mutex %p (%s)", __func__, m,
1294 m->lock_object.lo_name));
1295 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1296 m->lock_object.lo_name));
1299 v = atomic_load_acq_ptr(&m->mtx_lock);
1300 if (v == MTX_UNOWNED) {
1303 owner = lv_mtx_owner(v);
1304 if (!TD_IS_RUNNING(owner)) {
1315 db_show_mtx(const struct lock_object *lock)
1318 const struct mtx *m;
1320 m = (const struct mtx *)lock;
1322 db_printf(" flags: {");
1323 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1327 if (m->lock_object.lo_flags & LO_RECURSABLE)
1328 db_printf(", RECURSE");
1329 if (m->lock_object.lo_flags & LO_DUPOK)
1330 db_printf(", DUPOK");
1332 db_printf(" state: {");
1334 db_printf("UNOWNED");
1335 else if (mtx_destroyed(m))
1336 db_printf("DESTROYED");
1339 if (m->mtx_lock & MTX_CONTESTED)
1340 db_printf(", CONTESTED");
1341 if (m->mtx_lock & MTX_RECURSED)
1342 db_printf(", RECURSED");
1345 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1347 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1348 td->td_tid, td->td_proc->p_pid, td->td_name);
1349 if (mtx_recursed(m))
1350 db_printf(" recursed: %d\n", m->mtx_recurse);