2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 * promote products derived from this software without specific prior
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
35 * Machine independent bits of mutex implementation.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include "opt_adaptive_mutexes.h"
43 #include "opt_hwpmc_hooks.h"
44 #include "opt_sched.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
51 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
61 #include <sys/sysctl.h>
62 #include <sys/turnstile.h>
63 #include <sys/vmmeter.h>
64 #include <sys/lock_profile.h>
66 #include <machine/atomic.h>
67 #include <machine/bus.h>
68 #include <machine/cpu.h>
72 #include <fs/devfs/devfs_int.h>
75 #include <vm/vm_extern.h>
77 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78 #define ADAPTIVE_MUTEXES
82 #include <sys/pmckern.h>
83 PMC_SOFT_DEFINE( , , lock, failed);
87 * Return the mutex address when the lock cookie address is provided.
88 * This functionality assumes that struct mtx* have a member named mtx_lock.
90 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
93 * Internal utility macros.
95 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
97 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
99 static void assert_mtx(const struct lock_object *lock, int what);
101 static void db_show_mtx(const struct lock_object *lock);
103 static void lock_mtx(struct lock_object *lock, uintptr_t how);
104 static void lock_spin(struct lock_object *lock, uintptr_t how);
106 static int owner_mtx(const struct lock_object *lock,
107 struct thread **owner);
109 static uintptr_t unlock_mtx(struct lock_object *lock);
110 static uintptr_t unlock_spin(struct lock_object *lock);
113 * Lock classes for sleep and spin mutexes.
115 struct lock_class lock_class_mtx_sleep = {
116 .lc_name = "sleep mutex",
117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118 .lc_assert = assert_mtx,
120 .lc_ddb_show = db_show_mtx,
123 .lc_unlock = unlock_mtx,
125 .lc_owner = owner_mtx,
128 struct lock_class lock_class_mtx_spin = {
129 .lc_name = "spin mutex",
130 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
131 .lc_assert = assert_mtx,
133 .lc_ddb_show = db_show_mtx,
135 .lc_lock = lock_spin,
136 .lc_unlock = unlock_spin,
138 .lc_owner = owner_mtx,
142 #ifdef ADAPTIVE_MUTEXES
143 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
145 static struct lock_delay_config __read_frequently mtx_delay;
147 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
149 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
152 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
155 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
156 "mtx spin debugging");
158 static struct lock_delay_config __read_frequently mtx_spin_delay;
160 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
161 &mtx_spin_delay.base, 0, "");
162 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
163 &mtx_spin_delay.max, 0, "");
165 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
168 * System-wide mutexes
170 struct mtx blocked_lock;
171 struct mtx __exclusive_cache_line Giant;
173 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
176 assert_mtx(const struct lock_object *lock, int what)
179 mtx_assert((const struct mtx *)lock, what);
183 lock_mtx(struct lock_object *lock, uintptr_t how)
186 mtx_lock((struct mtx *)lock);
190 lock_spin(struct lock_object *lock, uintptr_t how)
193 panic("spin locks can only use msleep_spin");
197 unlock_mtx(struct lock_object *lock)
201 m = (struct mtx *)lock;
202 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
208 unlock_spin(struct lock_object *lock)
211 panic("spin locks can only use msleep_spin");
216 owner_mtx(const struct lock_object *lock, struct thread **owner)
221 m = (const struct mtx *)lock;
223 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
224 return (*owner != NULL);
229 * Function versions of the inlined __mtx_* macros. These are used by
230 * modules and can also be called from assembly language if needed.
233 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
240 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
241 !TD_IS_IDLETHREAD(curthread),
242 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
243 curthread, m->lock_object.lo_name, file, line));
244 KASSERT(m->mtx_lock != MTX_DESTROYED,
245 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
246 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
247 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
249 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
250 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
252 tid = (uintptr_t)curthread;
254 if (!_mtx_obtain_lock_fetch(m, &v, tid))
255 _mtx_lock_sleep(m, v, opts, file, line);
257 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
258 m, 0, 0, file, line);
259 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
261 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
263 TD_LOCKS_INC(curthread);
267 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
273 KASSERT(m->mtx_lock != MTX_DESTROYED,
274 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
275 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
276 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
278 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
279 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
281 mtx_assert(m, MA_OWNED);
283 #ifdef LOCK_PROFILING
284 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
286 __mtx_unlock(m, curthread, opts, file, line);
288 TD_LOCKS_DEC(curthread);
292 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
302 KASSERT(m->mtx_lock != MTX_DESTROYED,
303 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
304 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
305 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
306 m->lock_object.lo_name, file, line));
308 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
309 (opts & MTX_RECURSE) != 0,
310 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
311 m->lock_object.lo_name, file, line));
312 opts &= ~MTX_RECURSE;
313 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
317 tid = (uintptr_t)curthread;
319 if (!_mtx_obtain_lock_fetch(m, &v, tid))
320 _mtx_lock_spin(m, v, opts, file, line);
322 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire,
323 m, 0, 0, file, line);
325 __mtx_lock_spin(m, curthread, opts, file, line);
327 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
329 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
333 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
338 if (SCHEDULER_STOPPED())
343 KASSERT(m->mtx_lock != MTX_DESTROYED,
344 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
345 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
346 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
347 m->lock_object.lo_name, file, line));
348 KASSERT((opts & MTX_RECURSE) == 0,
349 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
350 m->lock_object.lo_name, file, line));
351 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
352 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
353 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
356 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
361 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
368 KASSERT(m->mtx_lock != MTX_DESTROYED,
369 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
370 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
371 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
372 m->lock_object.lo_name, file, line));
373 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
374 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
376 mtx_assert(m, MA_OWNED);
378 __mtx_unlock_spin(m);
382 * The important part of mtx_trylock{,_flags}()
383 * Tries to acquire lock `m.' If this function is called on a mutex that
384 * is already owned, it will recursively acquire the lock.
387 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
391 #ifdef LOCK_PROFILING
392 uint64_t waittime = 0;
400 if (SCHEDULER_STOPPED_TD(td))
403 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
404 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
405 curthread, m->lock_object.lo_name, file, line));
406 KASSERT(m->mtx_lock != MTX_DESTROYED,
407 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
408 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
409 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
416 if (_mtx_obtain_lock_fetch(m, &v, tid))
418 if (v == MTX_UNOWNED)
421 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
422 (opts & MTX_RECURSE) != 0)) {
424 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
432 opts &= ~MTX_RECURSE;
434 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
436 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
438 TD_LOCKS_INC(curthread);
440 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
441 m, contested, waittime, file, line);
448 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
453 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
457 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
459 * We call this if the lock is either contested (i.e. we need to go to
460 * sleep waiting for it), or if we need to recurse on it.
464 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
468 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
473 struct turnstile *ts;
475 struct thread *owner;
479 #ifdef LOCK_PROFILING
481 uint64_t waittime = 0;
483 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
484 struct lock_delay_arg lda;
488 int64_t sleep_time = 0;
489 int64_t all_time = 0;
491 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
496 if (SCHEDULER_STOPPED_TD(td))
499 #if defined(ADAPTIVE_MUTEXES)
500 lock_delay_arg_init(&lda, &mtx_delay);
501 #elif defined(KDTRACE_HOOKS)
502 lock_delay_arg_init(&lda, NULL);
505 if (__predict_false(v == MTX_UNOWNED))
506 v = MTX_READ_VALUE(m);
508 if (__predict_false(lv_mtx_owner(v) == td)) {
509 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
510 (opts & MTX_RECURSE) != 0,
511 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
512 m->lock_object.lo_name, file, line));
514 opts &= ~MTX_RECURSE;
517 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
518 if (LOCK_LOG_TEST(&m->lock_object, opts))
519 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
523 opts &= ~MTX_RECURSE;
527 PMC_SOFT_CALL( , , lock, failed);
529 lock_profile_obtain_lock_failed(&m->lock_object,
530 &contested, &waittime);
531 if (LOCK_LOG_TEST(&m->lock_object, opts))
533 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
534 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
535 #ifdef LOCK_PROFILING
537 #elif defined(KDTRACE_HOOKS)
538 doing_lockprof = lockstat_enabled;
539 if (__predict_false(doing_lockprof))
540 all_time -= lockstat_nsecs(&m->lock_object);
544 if (v == MTX_UNOWNED) {
545 if (_mtx_obtain_lock_fetch(m, &v, tid))
552 #ifdef ADAPTIVE_MUTEXES
554 * If the owner is running on another CPU, spin until the
555 * owner stops running or the state of the lock changes.
557 owner = lv_mtx_owner(v);
558 if (TD_IS_RUNNING(owner)) {
559 if (LOCK_LOG_TEST(&m->lock_object, 0))
561 "%s: spinning on %p held by %p",
563 KTR_STATE1(KTR_SCHED, "thread",
564 sched_tdname((struct thread *)tid),
565 "spinning", "lockname:\"%s\"",
566 m->lock_object.lo_name);
569 v = MTX_READ_VALUE(m);
570 owner = lv_mtx_owner(v);
571 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
572 KTR_STATE0(KTR_SCHED, "thread",
573 sched_tdname((struct thread *)tid),
579 ts = turnstile_trywait(&m->lock_object);
580 v = MTX_READ_VALUE(m);
584 * Check if the lock has been released while spinning for
585 * the turnstile chain lock.
587 if (v == MTX_UNOWNED) {
588 turnstile_cancel(ts);
592 #ifdef ADAPTIVE_MUTEXES
594 * The current lock owner might have started executing
595 * on another CPU (or the lock could have changed
596 * owners) while we were waiting on the turnstile
597 * chain lock. If so, drop the turnstile lock and try
600 owner = lv_mtx_owner(v);
601 if (TD_IS_RUNNING(owner)) {
602 turnstile_cancel(ts);
608 * If the mutex isn't already contested and a failure occurs
609 * setting the contested bit, the mutex was either released
610 * or the state of the MTX_RECURSED bit changed.
612 if ((v & MTX_CONTESTED) == 0 &&
613 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
614 goto retry_turnstile;
618 * We definitely must sleep for this lock.
620 mtx_assert(m, MA_NOTOWNED);
625 "contention: %p at %s:%d wants %s, taken by %s:%d",
626 (void *)tid, file, line, m->lock_object.lo_name,
627 WITNESS_FILE(&m->lock_object),
628 WITNESS_LINE(&m->lock_object));
634 * Block on the turnstile.
637 sleep_time -= lockstat_nsecs(&m->lock_object);
639 #ifndef ADAPTIVE_MUTEXES
640 owner = mtx_owner(m);
642 MPASS(owner == mtx_owner(m));
643 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
645 sleep_time += lockstat_nsecs(&m->lock_object);
648 v = MTX_READ_VALUE(m);
653 "contention end: %s acquired by %p at %s:%d",
654 m->lock_object.lo_name, (void *)tid, file, line);
657 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
658 if (__predict_true(!doing_lockprof))
662 all_time += lockstat_nsecs(&m->lock_object);
664 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
665 waittime, file, line);
668 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
671 * Only record the loops spinning and not sleeping.
673 if (lda.spin_cnt > sleep_cnt)
674 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
680 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
682 * This is only called if we need to actually spin for the lock. Recursion
687 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
688 const char *file, int line)
691 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
695 struct lock_delay_arg lda;
697 #ifdef LOCK_PROFILING
699 uint64_t waittime = 0;
702 int64_t spin_time = 0;
704 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
708 tid = (uintptr_t)curthread;
711 if (__predict_false(v == MTX_UNOWNED))
712 v = MTX_READ_VALUE(m);
714 if (__predict_false(v == tid)) {
719 if (SCHEDULER_STOPPED())
722 lock_delay_arg_init(&lda, &mtx_spin_delay);
724 if (LOCK_LOG_TEST(&m->lock_object, opts))
725 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
726 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
727 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
730 PMC_SOFT_CALL( , , lock, failed);
732 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
733 #ifdef LOCK_PROFILING
735 #elif defined(KDTRACE_HOOKS)
736 doing_lockprof = lockstat_enabled;
737 if (__predict_false(doing_lockprof))
738 spin_time -= lockstat_nsecs(&m->lock_object);
741 if (v == MTX_UNOWNED) {
742 if (_mtx_obtain_lock_fetch(m, &v, tid))
746 /* Give interrupts a chance while we spin. */
749 if (__predict_true(lda.spin_cnt < 10000000)) {
752 _mtx_lock_indefinite_check(m, &lda);
754 v = MTX_READ_VALUE(m);
755 } while (v != MTX_UNOWNED);
759 if (LOCK_LOG_TEST(&m->lock_object, opts))
760 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
761 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
764 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
765 if (__predict_true(!doing_lockprof))
769 spin_time += lockstat_nsecs(&m->lock_object);
771 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
772 contested, waittime, file, line);
774 if (lda.spin_cnt != 0)
775 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
782 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
785 KASSERT(m->mtx_lock != MTX_DESTROYED,
786 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
787 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
788 ("thread_lock() of sleep mutex %s @ %s:%d",
789 m->lock_object.lo_name, file, line));
791 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
792 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
793 m->lock_object.lo_name, file, line));
794 WITNESS_CHECKORDER(&m->lock_object,
795 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
798 #define thread_lock_validate(m, opts, file, line) do { } while (0)
801 #ifndef LOCK_PROFILING
804 _thread_lock(struct thread *td, int opts, const char *file, int line)
807 _thread_lock(struct thread *td)
813 tid = (uintptr_t)curthread;
815 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
819 thread_lock_validate(m, 0, file, line);
820 v = MTX_READ_VALUE(m);
821 if (__predict_true(v == MTX_UNOWNED)) {
822 if (__predict_false(!_mtx_obtain_lock(m, tid)))
823 goto slowpath_unlocked;
824 } else if (v == tid) {
827 goto slowpath_unlocked;
828 if (__predict_true(m == td->td_lock)) {
829 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
832 if (m->mtx_recurse != 0)
835 _mtx_release_lock_quick(m);
840 thread_lock_flags_(td, opts, file, line);
842 thread_lock_flags_(td, 0, 0, 0);
848 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
852 struct lock_delay_arg lda;
853 #ifdef LOCK_PROFILING
855 uint64_t waittime = 0;
858 int64_t spin_time = 0;
860 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
861 int doing_lockprof = 1;
864 tid = (uintptr_t)curthread;
866 if (SCHEDULER_STOPPED()) {
868 * Ensure that spinlock sections are balanced even when the
869 * scheduler is stopped, since we may otherwise inadvertently
870 * re-enable interrupts while dumping core.
876 lock_delay_arg_init(&lda, &mtx_spin_delay);
879 PMC_SOFT_CALL( , , lock, failed);
882 #ifdef LOCK_PROFILING
884 #elif defined(KDTRACE_HOOKS)
885 doing_lockprof = lockstat_enabled;
886 if (__predict_false(doing_lockprof))
887 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
893 thread_lock_validate(m, opts, file, line);
894 v = MTX_READ_VALUE(m);
896 if (v == MTX_UNOWNED) {
897 if (_mtx_obtain_lock_fetch(m, &v, tid))
905 lock_profile_obtain_lock_failed(&m->lock_object,
906 &contested, &waittime);
907 /* Give interrupts a chance while we spin. */
910 if (__predict_true(lda.spin_cnt < 10000000)) {
913 _mtx_lock_indefinite_check(m, &lda);
915 if (m != td->td_lock)
917 v = MTX_READ_VALUE(m);
918 } while (v != MTX_UNOWNED);
921 if (m == td->td_lock)
923 __mtx_unlock_spin(m); /* does spinlock_exit() */
925 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
927 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
929 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
930 if (__predict_true(!doing_lockprof))
934 spin_time += lockstat_nsecs(&m->lock_object);
936 if (m->mtx_recurse == 0)
937 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
938 contested, waittime, file, line);
940 if (lda.spin_cnt != 0)
941 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
946 thread_lock_block(struct thread *td)
950 THREAD_LOCK_ASSERT(td, MA_OWNED);
952 td->td_lock = &blocked_lock;
953 mtx_unlock_spin(lock);
959 thread_lock_unblock(struct thread *td, struct mtx *new)
961 mtx_assert(new, MA_OWNED);
962 MPASS(td->td_lock == &blocked_lock);
963 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
967 thread_lock_set(struct thread *td, struct mtx *new)
971 mtx_assert(new, MA_OWNED);
972 THREAD_LOCK_ASSERT(td, MA_OWNED);
975 mtx_unlock_spin(lock);
979 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
981 * We are only called here if the lock is recursed, contested (i.e. we
982 * need to wake up a blocked thread) or lockstat probe is active.
986 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
987 const char *file, int line)
990 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
994 struct turnstile *ts;
997 if (SCHEDULER_STOPPED())
1000 tid = (uintptr_t)curthread;
1003 if (__predict_false(v == tid))
1004 v = MTX_READ_VALUE(m);
1006 if (__predict_false(v & MTX_RECURSED)) {
1007 if (--(m->mtx_recurse) == 0)
1008 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1009 if (LOCK_LOG_TEST(&m->lock_object, opts))
1010 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1014 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1015 if (v == tid && _mtx_release_lock(m, tid))
1019 * We have to lock the chain before the turnstile so this turnstile
1020 * can be removed from the hash list if it is empty.
1022 turnstile_chain_lock(&m->lock_object);
1023 _mtx_release_lock_quick(m);
1024 ts = turnstile_lookup(&m->lock_object);
1026 if (LOCK_LOG_TEST(&m->lock_object, opts))
1027 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1028 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1031 * This turnstile is now no longer associated with the mutex. We can
1032 * unlock the chain lock so a new turnstile may take it's place.
1034 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1035 turnstile_chain_unlock(&m->lock_object);
1039 * All the unlocking of MTX_SPIN locks is done inline.
1040 * See the __mtx_unlock_spin() macro for the details.
1044 * The backing function for the INVARIANTS-enabled mtx_assert()
1046 #ifdef INVARIANT_SUPPORT
1048 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1050 const struct mtx *m;
1052 if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
1059 case MA_OWNED | MA_RECURSED:
1060 case MA_OWNED | MA_NOTRECURSED:
1062 panic("mutex %s not owned at %s:%d",
1063 m->lock_object.lo_name, file, line);
1064 if (mtx_recursed(m)) {
1065 if ((what & MA_NOTRECURSED) != 0)
1066 panic("mutex %s recursed at %s:%d",
1067 m->lock_object.lo_name, file, line);
1068 } else if ((what & MA_RECURSED) != 0) {
1069 panic("mutex %s unrecursed at %s:%d",
1070 m->lock_object.lo_name, file, line);
1075 panic("mutex %s owned at %s:%d",
1076 m->lock_object.lo_name, file, line);
1079 panic("unknown mtx_assert at %s:%d", file, line);
1085 * General init routine used by the MTX_SYSINIT() macro.
1088 mtx_sysinit(void *arg)
1090 struct mtx_args *margs = arg;
1092 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1097 * Mutex initialization routine; initialize lock `m' of type contained in
1098 * `opts' with options contained in `opts' and name `name.' The optional
1099 * lock type `type' is used as a general lock category name for use with
1103 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1106 struct lock_class *class;
1111 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1112 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1113 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1114 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1117 /* Determine lock class and lock flags. */
1118 if (opts & MTX_SPIN)
1119 class = &lock_class_mtx_spin;
1121 class = &lock_class_mtx_sleep;
1123 if (opts & MTX_QUIET)
1125 if (opts & MTX_RECURSE)
1126 flags |= LO_RECURSABLE;
1127 if ((opts & MTX_NOWITNESS) == 0)
1128 flags |= LO_WITNESS;
1129 if (opts & MTX_DUPOK)
1131 if (opts & MTX_NOPROFILE)
1132 flags |= LO_NOPROFILE;
1136 /* Initialize mutex. */
1137 lock_init(&m->lock_object, class, name, type, flags);
1139 m->mtx_lock = MTX_UNOWNED;
1144 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1145 * passed in as a flag here because if the corresponding mtx_init() was
1146 * called with MTX_QUIET set, then it will already be set in the mutex's
1150 _mtx_destroy(volatile uintptr_t *c)
1157 MPASS(mtx_unowned(m));
1159 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1161 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1162 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1165 TD_LOCKS_DEC(curthread);
1167 lock_profile_release_lock(&m->lock_object);
1168 /* Tell witness this isn't locked to make it happy. */
1169 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1173 m->mtx_lock = MTX_DESTROYED;
1174 lock_destroy(&m->lock_object);
1178 * Intialize the mutex code and system mutexes. This is called from the MD
1179 * startup code prior to mi_startup(). The per-CPU data space needs to be
1180 * setup before this is called.
1186 /* Setup turnstiles so that sleep mutexes work. */
1190 * Initialize mutexes.
1192 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1193 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1194 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1195 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1196 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1197 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1198 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1199 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1200 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1204 static void __noinline
1205 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1210 if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL)
1215 /* If the mutex is unlocked, try again. */
1219 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1220 m, m->lock_object.lo_name, td, td->td_tid);
1222 witness_display_spinlock(&m->lock_object, td, printf);
1224 panic("spin lock held too long");
1231 db_show_mtx(const struct lock_object *lock)
1234 const struct mtx *m;
1236 m = (const struct mtx *)lock;
1238 db_printf(" flags: {");
1239 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1243 if (m->lock_object.lo_flags & LO_RECURSABLE)
1244 db_printf(", RECURSE");
1245 if (m->lock_object.lo_flags & LO_DUPOK)
1246 db_printf(", DUPOK");
1248 db_printf(" state: {");
1250 db_printf("UNOWNED");
1251 else if (mtx_destroyed(m))
1252 db_printf("DESTROYED");
1255 if (m->mtx_lock & MTX_CONTESTED)
1256 db_printf(", CONTESTED");
1257 if (m->mtx_lock & MTX_RECURSED)
1258 db_printf(", RECURSED");
1261 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1263 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1264 td->td_tid, td->td_proc->p_pid, td->td_name);
1265 if (mtx_recursed(m))
1266 db_printf(" recursed: %d\n", m->mtx_recurse);