2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Machine independent bits of mutex implementation.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "opt_adaptive_mutexes.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
49 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
70 #include <fs/devfs/devfs_int.h>
73 #include <vm/vm_extern.h>
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define ADAPTIVE_MUTEXES
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
85 * Return the mutex address when the lock cookie address is provided.
86 * This functionality assumes that struct mtx* have a member named mtx_lock.
88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
91 * Internal utility macros.
93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
97 static void assert_mtx(const struct lock_object *lock, int what);
99 static void db_show_mtx(const struct lock_object *lock);
101 static void lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void lock_spin(struct lock_object *lock, uintptr_t how);
104 static int owner_mtx(const struct lock_object *lock,
105 struct thread **owner);
107 static uintptr_t unlock_mtx(struct lock_object *lock);
108 static uintptr_t unlock_spin(struct lock_object *lock);
111 * Lock classes for sleep and spin mutexes.
113 struct lock_class lock_class_mtx_sleep = {
114 .lc_name = "sleep mutex",
115 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
116 .lc_assert = assert_mtx,
118 .lc_ddb_show = db_show_mtx,
121 .lc_unlock = unlock_mtx,
123 .lc_owner = owner_mtx,
126 struct lock_class lock_class_mtx_spin = {
127 .lc_name = "spin mutex",
128 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
129 .lc_assert = assert_mtx,
131 .lc_ddb_show = db_show_mtx,
133 .lc_lock = lock_spin,
134 .lc_unlock = unlock_spin,
136 .lc_owner = owner_mtx,
140 #ifdef ADAPTIVE_MUTEXES
141 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
143 static struct lock_delay_config __read_frequently mtx_delay;
145 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
147 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
150 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
153 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
154 "mtx spin debugging");
156 static struct lock_delay_config __read_frequently mtx_spin_delay;
158 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
159 &mtx_spin_delay.base, 0, "");
160 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
161 &mtx_spin_delay.max, 0, "");
163 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
166 * System-wide mutexes
168 struct mtx blocked_lock;
169 struct mtx __exclusive_cache_line Giant;
172 assert_mtx(const struct lock_object *lock, int what)
175 mtx_assert((const struct mtx *)lock, what);
179 lock_mtx(struct lock_object *lock, uintptr_t how)
182 mtx_lock((struct mtx *)lock);
186 lock_spin(struct lock_object *lock, uintptr_t how)
189 panic("spin locks can only use msleep_spin");
193 unlock_mtx(struct lock_object *lock)
197 m = (struct mtx *)lock;
198 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
204 unlock_spin(struct lock_object *lock)
207 panic("spin locks can only use msleep_spin");
212 owner_mtx(const struct lock_object *lock, struct thread **owner)
217 m = (const struct mtx *)lock;
219 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
220 return (*owner != NULL);
225 * Function versions of the inlined __mtx_* macros. These are used by
226 * modules and can also be called from assembly language if needed.
229 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
236 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
237 !TD_IS_IDLETHREAD(curthread),
238 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
239 curthread, m->lock_object.lo_name, file, line));
240 KASSERT(m->mtx_lock != MTX_DESTROYED,
241 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
242 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
243 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
245 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
246 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
248 tid = (uintptr_t)curthread;
250 if (!_mtx_obtain_lock_fetch(m, &v, tid))
251 _mtx_lock_sleep(m, v, opts, file, line);
253 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
254 m, 0, 0, file, line);
255 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
257 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
259 TD_LOCKS_INC(curthread);
263 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
269 KASSERT(m->mtx_lock != MTX_DESTROYED,
270 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
271 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
272 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
274 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
275 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
277 mtx_assert(m, MA_OWNED);
279 #ifdef LOCK_PROFILING
280 __mtx_unlock_sleep(c, opts, file, line);
282 __mtx_unlock(m, curthread, opts, file, line);
284 TD_LOCKS_DEC(curthread);
288 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
298 KASSERT(m->mtx_lock != MTX_DESTROYED,
299 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
300 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
301 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
302 m->lock_object.lo_name, file, line));
304 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
305 (opts & MTX_RECURSE) != 0,
306 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
307 m->lock_object.lo_name, file, line));
308 opts &= ~MTX_RECURSE;
309 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
313 tid = (uintptr_t)curthread;
315 if (!_mtx_obtain_lock_fetch(m, &v, tid))
316 _mtx_lock_spin(m, v, opts, file, line);
318 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire,
319 m, 0, 0, file, line);
321 __mtx_lock_spin(m, curthread, opts, file, line);
323 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
325 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
329 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
334 if (SCHEDULER_STOPPED())
339 KASSERT(m->mtx_lock != MTX_DESTROYED,
340 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
341 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
342 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
343 m->lock_object.lo_name, file, line));
344 KASSERT((opts & MTX_RECURSE) == 0,
345 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
346 m->lock_object.lo_name, file, line));
347 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
348 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
349 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
352 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
357 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
364 KASSERT(m->mtx_lock != MTX_DESTROYED,
365 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
366 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
367 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
368 m->lock_object.lo_name, file, line));
369 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
370 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
372 mtx_assert(m, MA_OWNED);
374 __mtx_unlock_spin(m);
378 * The important part of mtx_trylock{,_flags}()
379 * Tries to acquire lock `m.' If this function is called on a mutex that
380 * is already owned, it will recursively acquire the lock.
383 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
388 #ifdef LOCK_PROFILING
389 uint64_t waittime = 0;
397 if (SCHEDULER_STOPPED_TD(td))
402 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
403 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
404 curthread, m->lock_object.lo_name, file, line));
405 KASSERT(m->mtx_lock != MTX_DESTROYED,
406 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
407 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
408 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
415 if (_mtx_obtain_lock_fetch(m, &v, tid))
417 if (v == MTX_UNOWNED)
420 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
421 (opts & MTX_RECURSE) != 0)) {
423 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
431 opts &= ~MTX_RECURSE;
433 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
435 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
437 TD_LOCKS_INC(curthread);
439 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
440 m, contested, waittime, file, line);
447 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
449 * We call this if the lock is either contested (i.e. we need to go to
450 * sleep waiting for it), or if we need to recurse on it.
454 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
458 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
463 struct turnstile *ts;
465 #ifdef ADAPTIVE_MUTEXES
466 volatile struct thread *owner;
471 #ifdef LOCK_PROFILING
473 uint64_t waittime = 0;
475 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
476 struct lock_delay_arg lda;
480 int64_t sleep_time = 0;
481 int64_t all_time = 0;
483 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
488 if (SCHEDULER_STOPPED_TD(td))
491 #if defined(ADAPTIVE_MUTEXES)
492 lock_delay_arg_init(&lda, &mtx_delay);
493 #elif defined(KDTRACE_HOOKS)
494 lock_delay_arg_init(&lda, NULL);
497 if (__predict_false(v == MTX_UNOWNED))
498 v = MTX_READ_VALUE(m);
500 if (__predict_false(lv_mtx_owner(v) == td)) {
501 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
502 (opts & MTX_RECURSE) != 0,
503 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
504 m->lock_object.lo_name, file, line));
506 opts &= ~MTX_RECURSE;
509 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
510 if (LOCK_LOG_TEST(&m->lock_object, opts))
511 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
515 opts &= ~MTX_RECURSE;
519 PMC_SOFT_CALL( , , lock, failed);
521 lock_profile_obtain_lock_failed(&m->lock_object,
522 &contested, &waittime);
523 if (LOCK_LOG_TEST(&m->lock_object, opts))
525 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
526 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
527 #ifdef LOCK_PROFILING
529 #elif defined(KDTRACE_HOOKS)
530 doing_lockprof = lockstat_enabled;
531 if (__predict_false(doing_lockprof))
532 all_time -= lockstat_nsecs(&m->lock_object);
536 if (v == MTX_UNOWNED) {
537 if (_mtx_obtain_lock_fetch(m, &v, tid))
544 #ifdef ADAPTIVE_MUTEXES
546 * If the owner is running on another CPU, spin until the
547 * owner stops running or the state of the lock changes.
549 owner = lv_mtx_owner(v);
550 if (TD_IS_RUNNING(owner)) {
551 if (LOCK_LOG_TEST(&m->lock_object, 0))
553 "%s: spinning on %p held by %p",
555 KTR_STATE1(KTR_SCHED, "thread",
556 sched_tdname((struct thread *)tid),
557 "spinning", "lockname:\"%s\"",
558 m->lock_object.lo_name);
561 v = MTX_READ_VALUE(m);
562 owner = lv_mtx_owner(v);
563 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
564 KTR_STATE0(KTR_SCHED, "thread",
565 sched_tdname((struct thread *)tid),
571 ts = turnstile_trywait(&m->lock_object);
572 v = MTX_READ_VALUE(m);
575 * Check if the lock has been released while spinning for
576 * the turnstile chain lock.
578 if (v == MTX_UNOWNED) {
579 turnstile_cancel(ts);
583 #ifdef ADAPTIVE_MUTEXES
585 * The current lock owner might have started executing
586 * on another CPU (or the lock could have changed
587 * owners) while we were waiting on the turnstile
588 * chain lock. If so, drop the turnstile lock and try
591 owner = lv_mtx_owner(v);
592 if (TD_IS_RUNNING(owner)) {
593 turnstile_cancel(ts);
599 * If the mutex isn't already contested and a failure occurs
600 * setting the contested bit, the mutex was either released
601 * or the state of the MTX_RECURSED bit changed.
603 if ((v & MTX_CONTESTED) == 0 &&
604 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
605 turnstile_cancel(ts);
606 v = MTX_READ_VALUE(m);
611 * We definitely must sleep for this lock.
613 mtx_assert(m, MA_NOTOWNED);
618 "contention: %p at %s:%d wants %s, taken by %s:%d",
619 (void *)tid, file, line, m->lock_object.lo_name,
620 WITNESS_FILE(&m->lock_object),
621 WITNESS_LINE(&m->lock_object));
627 * Block on the turnstile.
630 sleep_time -= lockstat_nsecs(&m->lock_object);
632 MPASS(owner == mtx_owner(m));
633 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
635 sleep_time += lockstat_nsecs(&m->lock_object);
638 v = MTX_READ_VALUE(m);
643 "contention end: %s acquired by %p at %s:%d",
644 m->lock_object.lo_name, (void *)tid, file, line);
647 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
648 if (__predict_true(!doing_lockprof))
652 all_time += lockstat_nsecs(&m->lock_object);
654 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
655 waittime, file, line);
658 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
661 * Only record the loops spinning and not sleeping.
663 if (lda.spin_cnt > sleep_cnt)
664 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
669 _mtx_lock_spin_failed(struct mtx *m)
675 /* If the mutex is unlocked, try again. */
679 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
680 m, m->lock_object.lo_name, td, td->td_tid);
682 witness_display_spinlock(&m->lock_object, td, printf);
684 panic("spin lock held too long");
689 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
691 * This is only called if we need to actually spin for the lock. Recursion
696 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
697 const char *file, int line)
700 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
704 struct lock_delay_arg lda;
706 #ifdef LOCK_PROFILING
708 uint64_t waittime = 0;
711 int64_t spin_time = 0;
713 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
717 tid = (uintptr_t)curthread;
720 if (__predict_false(v == MTX_UNOWNED))
721 v = MTX_READ_VALUE(m);
723 if (__predict_false(v == tid)) {
728 if (SCHEDULER_STOPPED())
731 lock_delay_arg_init(&lda, &mtx_spin_delay);
733 if (LOCK_LOG_TEST(&m->lock_object, opts))
734 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
735 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
736 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
739 PMC_SOFT_CALL( , , lock, failed);
741 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
742 #ifdef LOCK_PROFILING
744 #elif defined(KDTRACE_HOOKS)
745 doing_lockprof = lockstat_enabled;
746 if (__predict_false(doing_lockprof))
747 spin_time -= lockstat_nsecs(&m->lock_object);
750 if (v == MTX_UNOWNED) {
751 if (_mtx_obtain_lock_fetch(m, &v, tid))
755 /* Give interrupts a chance while we spin. */
758 if (lda.spin_cnt < 10000000) {
762 if (lda.spin_cnt < 60000000 || kdb_active ||
766 _mtx_lock_spin_failed(m);
769 v = MTX_READ_VALUE(m);
770 } while (v != MTX_UNOWNED);
774 if (LOCK_LOG_TEST(&m->lock_object, opts))
775 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
776 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
779 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
780 if (__predict_true(!doing_lockprof))
784 spin_time += lockstat_nsecs(&m->lock_object);
786 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
787 contested, waittime, file, line);
789 if (lda.spin_cnt != 0)
790 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
797 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
800 KASSERT(m->mtx_lock != MTX_DESTROYED,
801 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
802 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
803 ("thread_lock() of sleep mutex %s @ %s:%d",
804 m->lock_object.lo_name, file, line));
806 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
807 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
808 m->lock_object.lo_name, file, line));
809 WITNESS_CHECKORDER(&m->lock_object,
810 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
813 #define thread_lock_validate(m, opts, file, line) do { } while (0)
816 #ifndef LOCK_PROFILING
819 _thread_lock(struct thread *td, int opts, const char *file, int line)
822 _thread_lock(struct thread *td)
828 tid = (uintptr_t)curthread;
832 thread_lock_validate(m, 0, file, line);
833 v = MTX_READ_VALUE(m);
834 if (__predict_true(v == MTX_UNOWNED)) {
835 if (__predict_false(!_mtx_obtain_lock(m, tid)))
836 goto slowpath_unlocked;
837 } else if (v == tid) {
840 goto slowpath_unlocked;
841 if (__predict_true(m == td->td_lock)) {
842 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
845 if (m->mtx_recurse != 0)
848 _mtx_release_lock_quick(m);
851 thread_lock_flags_(td, 0, 0, 0);
856 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
860 struct lock_delay_arg lda;
861 #ifdef LOCK_PROFILING
863 uint64_t waittime = 0;
866 int64_t spin_time = 0;
868 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
869 int doing_lockprof = 1;
872 tid = (uintptr_t)curthread;
874 if (SCHEDULER_STOPPED()) {
876 * Ensure that spinlock sections are balanced even when the
877 * scheduler is stopped, since we may otherwise inadvertently
878 * re-enable interrupts while dumping core.
884 lock_delay_arg_init(&lda, &mtx_spin_delay);
886 #ifdef LOCK_PROFILING
888 #elif defined(KDTRACE_HOOKS)
889 doing_lockprof = lockstat_enabled;
890 if (__predict_false(doing_lockprof))
891 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
898 thread_lock_validate(m, opts, file, line);
900 if (_mtx_obtain_lock_fetch(m, &v, tid))
902 if (v == MTX_UNOWNED)
909 PMC_SOFT_CALL( , , lock, failed);
911 lock_profile_obtain_lock_failed(&m->lock_object,
912 &contested, &waittime);
913 /* Give interrupts a chance while we spin. */
916 if (lda.spin_cnt < 10000000) {
920 if (lda.spin_cnt < 60000000 ||
921 kdb_active || panicstr != NULL)
924 _mtx_lock_spin_failed(m);
927 if (m != td->td_lock)
929 v = MTX_READ_VALUE(m);
930 } while (v != MTX_UNOWNED);
933 if (m == td->td_lock)
935 __mtx_unlock_spin(m); /* does spinlock_exit() */
937 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
939 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
941 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
942 if (__predict_true(!doing_lockprof))
946 spin_time += lockstat_nsecs(&m->lock_object);
948 if (m->mtx_recurse == 0)
949 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
950 contested, waittime, file, line);
952 if (lda.spin_cnt != 0)
953 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
958 thread_lock_block(struct thread *td)
962 THREAD_LOCK_ASSERT(td, MA_OWNED);
964 td->td_lock = &blocked_lock;
965 mtx_unlock_spin(lock);
971 thread_lock_unblock(struct thread *td, struct mtx *new)
973 mtx_assert(new, MA_OWNED);
974 MPASS(td->td_lock == &blocked_lock);
975 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
979 thread_lock_set(struct thread *td, struct mtx *new)
983 mtx_assert(new, MA_OWNED);
984 THREAD_LOCK_ASSERT(td, MA_OWNED);
987 mtx_unlock_spin(lock);
991 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
993 * We are only called here if the lock is recursed, contested (i.e. we
994 * need to wake up a blocked thread) or lockstat probe is active.
998 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
1001 __mtx_unlock_sleep(volatile uintptr_t *c)
1005 struct turnstile *ts;
1008 if (SCHEDULER_STOPPED())
1011 tid = (uintptr_t)curthread;
1013 v = MTX_READ_VALUE(m);
1015 if (v & MTX_RECURSED) {
1016 if (--(m->mtx_recurse) == 0)
1017 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1018 if (LOCK_LOG_TEST(&m->lock_object, opts))
1019 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1023 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1024 if (v == tid && _mtx_release_lock(m, tid))
1028 * We have to lock the chain before the turnstile so this turnstile
1029 * can be removed from the hash list if it is empty.
1031 turnstile_chain_lock(&m->lock_object);
1032 _mtx_release_lock_quick(m);
1033 ts = turnstile_lookup(&m->lock_object);
1035 if (LOCK_LOG_TEST(&m->lock_object, opts))
1036 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1037 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1040 * This turnstile is now no longer associated with the mutex. We can
1041 * unlock the chain lock so a new turnstile may take it's place.
1043 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1044 turnstile_chain_unlock(&m->lock_object);
1048 * All the unlocking of MTX_SPIN locks is done inline.
1049 * See the __mtx_unlock_spin() macro for the details.
1053 * The backing function for the INVARIANTS-enabled mtx_assert()
1055 #ifdef INVARIANT_SUPPORT
1057 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1059 const struct mtx *m;
1061 if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
1068 case MA_OWNED | MA_RECURSED:
1069 case MA_OWNED | MA_NOTRECURSED:
1071 panic("mutex %s not owned at %s:%d",
1072 m->lock_object.lo_name, file, line);
1073 if (mtx_recursed(m)) {
1074 if ((what & MA_NOTRECURSED) != 0)
1075 panic("mutex %s recursed at %s:%d",
1076 m->lock_object.lo_name, file, line);
1077 } else if ((what & MA_RECURSED) != 0) {
1078 panic("mutex %s unrecursed at %s:%d",
1079 m->lock_object.lo_name, file, line);
1084 panic("mutex %s owned at %s:%d",
1085 m->lock_object.lo_name, file, line);
1088 panic("unknown mtx_assert at %s:%d", file, line);
1094 * General init routine used by the MTX_SYSINIT() macro.
1097 mtx_sysinit(void *arg)
1099 struct mtx_args *margs = arg;
1101 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1106 * Mutex initialization routine; initialize lock `m' of type contained in
1107 * `opts' with options contained in `opts' and name `name.' The optional
1108 * lock type `type' is used as a general lock category name for use with
1112 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1115 struct lock_class *class;
1120 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1121 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1122 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1123 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1126 /* Determine lock class and lock flags. */
1127 if (opts & MTX_SPIN)
1128 class = &lock_class_mtx_spin;
1130 class = &lock_class_mtx_sleep;
1132 if (opts & MTX_QUIET)
1134 if (opts & MTX_RECURSE)
1135 flags |= LO_RECURSABLE;
1136 if ((opts & MTX_NOWITNESS) == 0)
1137 flags |= LO_WITNESS;
1138 if (opts & MTX_DUPOK)
1140 if (opts & MTX_NOPROFILE)
1141 flags |= LO_NOPROFILE;
1145 /* Initialize mutex. */
1146 lock_init(&m->lock_object, class, name, type, flags);
1148 m->mtx_lock = MTX_UNOWNED;
1153 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1154 * passed in as a flag here because if the corresponding mtx_init() was
1155 * called with MTX_QUIET set, then it will already be set in the mutex's
1159 _mtx_destroy(volatile uintptr_t *c)
1166 MPASS(mtx_unowned(m));
1168 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1170 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1171 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1174 TD_LOCKS_DEC(curthread);
1176 lock_profile_release_lock(&m->lock_object);
1177 /* Tell witness this isn't locked to make it happy. */
1178 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1182 m->mtx_lock = MTX_DESTROYED;
1183 lock_destroy(&m->lock_object);
1187 * Intialize the mutex code and system mutexes. This is called from the MD
1188 * startup code prior to mi_startup(). The per-CPU data space needs to be
1189 * setup before this is called.
1195 /* Setup turnstiles so that sleep mutexes work. */
1199 * Initialize mutexes.
1201 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1202 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1203 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1204 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1205 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1206 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1207 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1208 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1209 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1215 db_show_mtx(const struct lock_object *lock)
1218 const struct mtx *m;
1220 m = (const struct mtx *)lock;
1222 db_printf(" flags: {");
1223 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1227 if (m->lock_object.lo_flags & LO_RECURSABLE)
1228 db_printf(", RECURSE");
1229 if (m->lock_object.lo_flags & LO_DUPOK)
1230 db_printf(", DUPOK");
1232 db_printf(" state: {");
1234 db_printf("UNOWNED");
1235 else if (mtx_destroyed(m))
1236 db_printf("DESTROYED");
1239 if (m->mtx_lock & MTX_CONTESTED)
1240 db_printf(", CONTESTED");
1241 if (m->mtx_lock & MTX_RECURSED)
1242 db_printf(", RECURSED");
1245 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1247 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1248 td->td_tid, td->td_proc->p_pid, td->td_name);
1249 if (mtx_recursed(m))
1250 db_printf(" recursed: %d\n", m->mtx_recurse);