2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Machine independent bits of mutex implementation.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "opt_adaptive_mutexes.h"
41 #include "opt_global.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_sched.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
51 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
60 #include <sys/sysctl.h>
61 #include <sys/turnstile.h>
62 #include <sys/vmmeter.h>
63 #include <sys/lock_profile.h>
65 #include <machine/atomic.h>
66 #include <machine/bus.h>
67 #include <machine/cpu.h>
71 #include <fs/devfs/devfs_int.h>
74 #include <vm/vm_extern.h>
76 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
77 #define ADAPTIVE_MUTEXES
81 #include <sys/pmckern.h>
82 PMC_SOFT_DEFINE( , , lock, failed);
86 * Return the mutex address when the lock cookie address is provided.
87 * This functionality assumes that struct mtx* have a member named mtx_lock.
89 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
92 * Internal utility macros.
94 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
96 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
98 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
100 static void assert_mtx(const struct lock_object *lock, int what);
102 static void db_show_mtx(const struct lock_object *lock);
104 static void lock_mtx(struct lock_object *lock, int how);
105 static void lock_spin(struct lock_object *lock, int how);
107 static int owner_mtx(const struct lock_object *lock,
108 struct thread **owner);
110 static int unlock_mtx(struct lock_object *lock);
111 static int unlock_spin(struct lock_object *lock);
114 * Lock classes for sleep and spin mutexes.
116 struct lock_class lock_class_mtx_sleep = {
117 .lc_name = "sleep mutex",
118 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
119 .lc_assert = assert_mtx,
121 .lc_ddb_show = db_show_mtx,
124 .lc_unlock = unlock_mtx,
126 .lc_owner = owner_mtx,
129 struct lock_class lock_class_mtx_spin = {
130 .lc_name = "spin mutex",
131 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
132 .lc_assert = assert_mtx,
134 .lc_ddb_show = db_show_mtx,
136 .lc_lock = lock_spin,
137 .lc_unlock = unlock_spin,
139 .lc_owner = owner_mtx,
144 * System-wide mutexes
146 struct mtx blocked_lock;
150 assert_mtx(const struct lock_object *lock, int what)
153 mtx_assert((const struct mtx *)lock, what);
157 lock_mtx(struct lock_object *lock, int how)
160 mtx_lock((struct mtx *)lock);
164 lock_spin(struct lock_object *lock, int how)
167 panic("spin locks can only use msleep_spin");
171 unlock_mtx(struct lock_object *lock)
175 m = (struct mtx *)lock;
176 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
182 unlock_spin(struct lock_object *lock)
185 panic("spin locks can only use msleep_spin");
190 owner_mtx(const struct lock_object *lock, struct thread **owner)
192 const struct mtx *m = (const struct mtx *)lock;
194 *owner = mtx_owner(m);
195 return (mtx_unowned(m) == 0);
200 * Function versions of the inlined __mtx_* macros. These are used by
201 * modules and can also be called from assembly language if needed.
204 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
208 if (SCHEDULER_STOPPED())
213 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
214 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
215 curthread, m->lock_object.lo_name, file, line));
216 KASSERT(m->mtx_lock != MTX_DESTROYED,
217 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
218 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
219 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
221 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
224 __mtx_lock(m, curthread, opts, file, line);
225 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
227 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
228 curthread->td_locks++;
232 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
236 if (SCHEDULER_STOPPED())
241 KASSERT(m->mtx_lock != MTX_DESTROYED,
242 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
243 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
244 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
246 curthread->td_locks--;
247 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
248 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
250 mtx_assert(m, MA_OWNED);
252 if (m->mtx_recurse == 0)
253 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
254 __mtx_unlock(m, curthread, opts, file, line);
258 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
263 if (SCHEDULER_STOPPED())
268 KASSERT(m->mtx_lock != MTX_DESTROYED,
269 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
270 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
271 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
272 m->lock_object.lo_name, file, line));
274 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
275 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
276 m->lock_object.lo_name, file, line));
277 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
279 __mtx_lock_spin(m, curthread, opts, file, line);
280 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
282 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
286 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
291 if (SCHEDULER_STOPPED())
296 KASSERT(m->mtx_lock != MTX_DESTROYED,
297 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
298 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
299 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
300 m->lock_object.lo_name, file, line));
301 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
302 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
304 mtx_assert(m, MA_OWNED);
306 __mtx_unlock_spin(m);
310 * The important part of mtx_trylock{,_flags}()
311 * Tries to acquire lock `m.' If this function is called on a mutex that
312 * is already owned, it will recursively acquire the lock.
315 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
318 #ifdef LOCK_PROFILING
319 uint64_t waittime = 0;
324 if (SCHEDULER_STOPPED())
329 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
330 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
331 curthread, m->lock_object.lo_name, file, line));
332 KASSERT(m->mtx_lock != MTX_DESTROYED,
333 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
334 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
335 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
338 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
340 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
343 rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
345 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
347 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
349 curthread->td_locks++;
350 if (m->mtx_recurse == 0)
351 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
352 m, contested, waittime, file, line);
360 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
362 * We call this if the lock is either contested (i.e. we need to go to
363 * sleep waiting for it), or if we need to recurse on it.
366 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
367 const char *file, int line)
370 struct turnstile *ts;
372 #ifdef ADAPTIVE_MUTEXES
373 volatile struct thread *owner;
378 #ifdef LOCK_PROFILING
380 uint64_t waittime = 0;
383 uint64_t spin_cnt = 0;
384 uint64_t sleep_cnt = 0;
385 int64_t sleep_time = 0;
388 if (SCHEDULER_STOPPED())
394 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
395 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
396 m->lock_object.lo_name, file, line));
398 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
399 if (LOCK_LOG_TEST(&m->lock_object, opts))
400 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
405 PMC_SOFT_CALL( , , lock, failed);
407 lock_profile_obtain_lock_failed(&m->lock_object,
408 &contested, &waittime);
409 if (LOCK_LOG_TEST(&m->lock_object, opts))
411 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
412 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
414 while (!_mtx_obtain_lock(m, tid)) {
418 #ifdef ADAPTIVE_MUTEXES
420 * If the owner is running on another CPU, spin until the
421 * owner stops running or the state of the lock changes.
424 if (v != MTX_UNOWNED) {
425 owner = (struct thread *)(v & ~MTX_FLAGMASK);
426 if (TD_IS_RUNNING(owner)) {
427 if (LOCK_LOG_TEST(&m->lock_object, 0))
429 "%s: spinning on %p held by %p",
431 while (mtx_owner(m) == owner &&
432 TD_IS_RUNNING(owner)) {
443 ts = turnstile_trywait(&m->lock_object);
447 * Check if the lock has been released while spinning for
448 * the turnstile chain lock.
450 if (v == MTX_UNOWNED) {
451 turnstile_cancel(ts);
455 #ifdef ADAPTIVE_MUTEXES
457 * The current lock owner might have started executing
458 * on another CPU (or the lock could have changed
459 * owners) while we were waiting on the turnstile
460 * chain lock. If so, drop the turnstile lock and try
463 owner = (struct thread *)(v & ~MTX_FLAGMASK);
464 if (TD_IS_RUNNING(owner)) {
465 turnstile_cancel(ts);
471 * If the mutex isn't already contested and a failure occurs
472 * setting the contested bit, the mutex was either released
473 * or the state of the MTX_RECURSED bit changed.
475 if ((v & MTX_CONTESTED) == 0 &&
476 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
477 turnstile_cancel(ts);
482 * We definitely must sleep for this lock.
484 mtx_assert(m, MA_NOTOWNED);
489 "contention: %p at %s:%d wants %s, taken by %s:%d",
490 (void *)tid, file, line, m->lock_object.lo_name,
491 WITNESS_FILE(&m->lock_object),
492 WITNESS_LINE(&m->lock_object));
498 * Block on the turnstile.
501 sleep_time -= lockstat_nsecs();
503 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
505 sleep_time += lockstat_nsecs();
512 "contention end: %s acquired by %p at %s:%d",
513 m->lock_object.lo_name, (void *)tid, file, line);
516 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
517 waittime, file, line);
520 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
523 * Only record the loops spinning and not sleeping.
525 if (spin_cnt > sleep_cnt)
526 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
531 _mtx_lock_spin_failed(struct mtx *m)
537 /* If the mutex is unlocked, try again. */
541 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
542 m, m->lock_object.lo_name, td, td->td_tid);
544 witness_display_spinlock(&m->lock_object, td, printf);
546 panic("spin lock held too long");
551 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
553 * This is only called if we need to actually spin for the lock. Recursion
557 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
558 const char *file, int line)
562 #ifdef LOCK_PROFILING
564 uint64_t waittime = 0;
567 if (SCHEDULER_STOPPED())
572 if (LOCK_LOG_TEST(&m->lock_object, opts))
573 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
576 PMC_SOFT_CALL( , , lock, failed);
578 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
579 while (!_mtx_obtain_lock(m, tid)) {
581 /* Give interrupts a chance while we spin. */
583 while (m->mtx_lock != MTX_UNOWNED) {
584 if (i++ < 10000000) {
588 if (i < 60000000 || kdb_active || panicstr != NULL)
591 _mtx_lock_spin_failed(m);
597 if (LOCK_LOG_TEST(&m->lock_object, opts))
598 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
600 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
601 contested, waittime, (file), (line));
602 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
607 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
612 #ifdef LOCK_PROFILING
614 uint64_t waittime = 0;
617 uint64_t spin_cnt = 0;
621 tid = (uintptr_t)curthread;
623 if (SCHEDULER_STOPPED())
630 KASSERT(m->mtx_lock != MTX_DESTROYED,
631 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
632 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
633 ("thread_lock() of sleep mutex %s @ %s:%d",
634 m->lock_object.lo_name, file, line));
636 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
637 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
638 m->lock_object.lo_name, file, line));
639 WITNESS_CHECKORDER(&m->lock_object,
640 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
641 while (!_mtx_obtain_lock(m, tid)) {
645 if (m->mtx_lock == tid) {
650 PMC_SOFT_CALL( , , lock, failed);
652 lock_profile_obtain_lock_failed(&m->lock_object,
653 &contested, &waittime);
654 /* Give interrupts a chance while we spin. */
656 while (m->mtx_lock != MTX_UNOWNED) {
659 else if (i < 60000000 ||
660 kdb_active || panicstr != NULL)
663 _mtx_lock_spin_failed(m);
665 if (m != td->td_lock)
670 if (m == td->td_lock)
672 __mtx_unlock_spin(m); /* does spinlock_exit() */
677 if (m->mtx_recurse == 0)
678 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
679 m, contested, waittime, (file), (line));
680 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
682 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
683 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
687 thread_lock_block(struct thread *td)
691 THREAD_LOCK_ASSERT(td, MA_OWNED);
693 td->td_lock = &blocked_lock;
694 mtx_unlock_spin(lock);
700 thread_lock_unblock(struct thread *td, struct mtx *new)
702 mtx_assert(new, MA_OWNED);
703 MPASS(td->td_lock == &blocked_lock);
704 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
708 thread_lock_set(struct thread *td, struct mtx *new)
712 mtx_assert(new, MA_OWNED);
713 THREAD_LOCK_ASSERT(td, MA_OWNED);
716 mtx_unlock_spin(lock);
720 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
722 * We are only called here if the lock is recursed or contested (i.e. we
723 * need to wake up a blocked thread).
726 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
729 struct turnstile *ts;
731 if (SCHEDULER_STOPPED())
736 if (mtx_recursed(m)) {
737 if (--(m->mtx_recurse) == 0)
738 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
739 if (LOCK_LOG_TEST(&m->lock_object, opts))
740 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
745 * We have to lock the chain before the turnstile so this turnstile
746 * can be removed from the hash list if it is empty.
748 turnstile_chain_lock(&m->lock_object);
749 ts = turnstile_lookup(&m->lock_object);
750 if (LOCK_LOG_TEST(&m->lock_object, opts))
751 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
753 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
754 _mtx_release_lock_quick(m);
757 * This turnstile is now no longer associated with the mutex. We can
758 * unlock the chain lock so a new turnstile may take it's place.
760 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
761 turnstile_chain_unlock(&m->lock_object);
765 * All the unlocking of MTX_SPIN locks is done inline.
766 * See the __mtx_unlock_spin() macro for the details.
770 * The backing function for the INVARIANTS-enabled mtx_assert()
772 #ifdef INVARIANT_SUPPORT
774 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
778 if (panicstr != NULL || dumping)
785 case MA_OWNED | MA_RECURSED:
786 case MA_OWNED | MA_NOTRECURSED:
788 panic("mutex %s not owned at %s:%d",
789 m->lock_object.lo_name, file, line);
790 if (mtx_recursed(m)) {
791 if ((what & MA_NOTRECURSED) != 0)
792 panic("mutex %s recursed at %s:%d",
793 m->lock_object.lo_name, file, line);
794 } else if ((what & MA_RECURSED) != 0) {
795 panic("mutex %s unrecursed at %s:%d",
796 m->lock_object.lo_name, file, line);
801 panic("mutex %s owned at %s:%d",
802 m->lock_object.lo_name, file, line);
805 panic("unknown mtx_assert at %s:%d", file, line);
811 * The MUTEX_DEBUG-enabled mtx_validate()
813 * Most of these checks have been moved off into the LO_INITIALIZED flag
814 * maintained by the witness code.
818 void mtx_validate(struct mtx *);
821 mtx_validate(struct mtx *m)
825 * XXX: When kernacc() does not require Giant we can reenable this check
829 * Can't call kernacc() from early init386(), especially when
830 * initializing Giant mutex, because some stuff in kernacc()
831 * requires Giant itself.
834 if (!kernacc((caddr_t)m, sizeof(m),
835 VM_PROT_READ | VM_PROT_WRITE))
836 panic("Can't read and write to mutex %p", m);
842 * General init routine used by the MTX_SYSINIT() macro.
845 mtx_sysinit(void *arg)
847 struct mtx_args *margs = arg;
849 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
854 * Mutex initialization routine; initialize lock `m' of type contained in
855 * `opts' with options contained in `opts' and name `name.' The optional
856 * lock type `type' is used as a general lock category name for use with
860 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
863 struct lock_class *class;
868 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
869 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
870 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
871 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
875 /* Diagnostic and error correction */
879 /* Determine lock class and lock flags. */
881 class = &lock_class_mtx_spin;
883 class = &lock_class_mtx_sleep;
885 if (opts & MTX_QUIET)
887 if (opts & MTX_RECURSE)
888 flags |= LO_RECURSABLE;
889 if ((opts & MTX_NOWITNESS) == 0)
891 if (opts & MTX_DUPOK)
893 if (opts & MTX_NOPROFILE)
894 flags |= LO_NOPROFILE;
896 /* Initialize mutex. */
897 m->mtx_lock = MTX_UNOWNED;
900 lock_init(&m->lock_object, class, name, type, flags);
904 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
905 * passed in as a flag here because if the corresponding mtx_init() was
906 * called with MTX_QUIET set, then it will already be set in the mutex's
910 _mtx_destroy(volatile uintptr_t *c)
917 MPASS(mtx_unowned(m));
919 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
921 /* Perform the non-mtx related part of mtx_unlock_spin(). */
922 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
925 curthread->td_locks--;
927 lock_profile_release_lock(&m->lock_object);
928 /* Tell witness this isn't locked to make it happy. */
929 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
933 m->mtx_lock = MTX_DESTROYED;
934 lock_destroy(&m->lock_object);
938 * Intialize the mutex code and system mutexes. This is called from the MD
939 * startup code prior to mi_startup(). The per-CPU data space needs to be
940 * setup before this is called.
946 /* Setup turnstiles so that sleep mutexes work. */
950 * Initialize mutexes.
952 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
953 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
954 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
955 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
956 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
957 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
963 db_show_mtx(const struct lock_object *lock)
968 m = (const struct mtx *)lock;
970 db_printf(" flags: {");
971 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
975 if (m->lock_object.lo_flags & LO_RECURSABLE)
976 db_printf(", RECURSE");
977 if (m->lock_object.lo_flags & LO_DUPOK)
978 db_printf(", DUPOK");
980 db_printf(" state: {");
982 db_printf("UNOWNED");
983 else if (mtx_destroyed(m))
984 db_printf("DESTROYED");
987 if (m->mtx_lock & MTX_CONTESTED)
988 db_printf(", CONTESTED");
989 if (m->mtx_lock & MTX_RECURSED)
990 db_printf(", RECURSED");
993 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
995 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
996 td->td_tid, td->td_proc->p_pid, td->td_name);
998 db_printf(" recursed: %d\n", m->mtx_recurse);