2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Machine independent bits of mutex implementation.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "opt_adaptive_mutexes.h"
41 #include "opt_global.h"
42 #include "opt_sched.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
49 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
58 #include <sys/sysctl.h>
59 #include <sys/turnstile.h>
60 #include <sys/vmmeter.h>
61 #include <sys/lock_profile.h>
63 #include <machine/atomic.h>
64 #include <machine/bus.h>
65 #include <machine/cpu.h>
69 #include <fs/devfs/devfs_int.h>
72 #include <vm/vm_extern.h>
74 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
75 #define ADAPTIVE_MUTEXES
79 * Internal utility macros.
81 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
83 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
85 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
87 static void assert_mtx(struct lock_object *lock, int what);
89 static void db_show_mtx(struct lock_object *lock);
91 static void lock_mtx(struct lock_object *lock, int how);
92 static void lock_spin(struct lock_object *lock, int how);
93 static int unlock_mtx(struct lock_object *lock);
94 static int unlock_spin(struct lock_object *lock);
97 * Lock classes for sleep and spin mutexes.
99 struct lock_class lock_class_mtx_sleep = {
100 .lc_name = "sleep mutex",
101 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
102 .lc_assert = assert_mtx,
104 .lc_ddb_show = db_show_mtx,
107 .lc_unlock = unlock_mtx,
109 struct lock_class lock_class_mtx_spin = {
110 .lc_name = "spin mutex",
111 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
112 .lc_assert = assert_mtx,
114 .lc_ddb_show = db_show_mtx,
116 .lc_lock = lock_spin,
117 .lc_unlock = unlock_spin,
121 * System-wide mutexes
123 struct mtx blocked_lock;
127 assert_mtx(struct lock_object *lock, int what)
130 mtx_assert((struct mtx *)lock, what);
134 lock_mtx(struct lock_object *lock, int how)
137 mtx_lock((struct mtx *)lock);
141 lock_spin(struct lock_object *lock, int how)
144 panic("spin locks can only use msleep_spin");
148 unlock_mtx(struct lock_object *lock)
152 m = (struct mtx *)lock;
153 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
159 unlock_spin(struct lock_object *lock)
162 panic("spin locks can only use msleep_spin");
166 * Function versions of the inlined __mtx_* macros. These are used by
167 * modules and can also be called from assembly language if needed.
170 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
173 MPASS(curthread != NULL);
174 KASSERT(m->mtx_lock != MTX_DESTROYED,
175 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
176 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
177 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
179 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
182 _get_sleep_lock(m, curthread, opts, file, line);
183 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
185 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
186 curthread->td_locks++;
190 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
192 MPASS(curthread != NULL);
193 KASSERT(m->mtx_lock != MTX_DESTROYED,
194 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
195 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
196 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
198 curthread->td_locks--;
199 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
200 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
202 mtx_assert(m, MA_OWNED);
204 if (m->mtx_recurse == 0)
205 lock_profile_release_lock(&m->lock_object);
206 _rel_sleep_lock(m, curthread, opts, file, line);
210 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
213 MPASS(curthread != NULL);
214 KASSERT(m->mtx_lock != MTX_DESTROYED,
215 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
216 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
217 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
218 m->lock_object.lo_name, file, line));
219 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
221 _get_spin_lock(m, curthread, opts, file, line);
222 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
224 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
228 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
231 MPASS(curthread != NULL);
232 KASSERT(m->mtx_lock != MTX_DESTROYED,
233 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
234 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
235 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
236 m->lock_object.lo_name, file, line));
237 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
238 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
240 mtx_assert(m, MA_OWNED);
246 * The important part of mtx_trylock{,_flags}()
247 * Tries to acquire lock `m.' If this function is called on a mutex that
248 * is already owned, it will recursively acquire the lock.
251 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
253 int rval, contested = 0;
254 uint64_t waittime = 0;
256 MPASS(curthread != NULL);
257 KASSERT(m->mtx_lock != MTX_DESTROYED,
258 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
259 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
260 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
263 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
265 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
268 rval = _obtain_lock(m, (uintptr_t)curthread);
270 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
272 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
274 curthread->td_locks++;
275 if (m->mtx_recurse == 0)
276 lock_profile_obtain_lock_success(&m->lock_object, contested,
277 waittime, file, line);
285 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
287 * We call this if the lock is either contested (i.e. we need to go to
288 * sleep waiting for it), or if we need to recurse on it.
291 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
294 struct turnstile *ts;
295 #ifdef ADAPTIVE_MUTEXES
296 volatile struct thread *owner;
302 uint64_t waittime = 0;
306 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
307 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
308 m->lock_object.lo_name, file, line));
310 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
311 if (LOCK_LOG_TEST(&m->lock_object, opts))
312 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
316 lock_profile_obtain_lock_failed(&m->lock_object,
317 &contested, &waittime);
318 if (LOCK_LOG_TEST(&m->lock_object, opts))
320 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
321 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
323 while (!_obtain_lock(m, tid)) {
324 #ifdef ADAPTIVE_MUTEXES
326 * If the owner is running on another CPU, spin until the
327 * owner stops running or the state of the lock changes.
330 if (v != MTX_UNOWNED) {
331 owner = (struct thread *)(v & ~MTX_FLAGMASK);
332 if (TD_IS_RUNNING(owner)) {
333 if (LOCK_LOG_TEST(&m->lock_object, 0))
335 "%s: spinning on %p held by %p",
337 while (mtx_owner(m) == owner &&
338 TD_IS_RUNNING(owner))
345 ts = turnstile_trywait(&m->lock_object);
349 * Check if the lock has been released while spinning for
350 * the turnstile chain lock.
352 if (v == MTX_UNOWNED) {
353 turnstile_cancel(ts);
358 MPASS(v != MTX_CONTESTED);
360 #ifdef ADAPTIVE_MUTEXES
362 * If the current owner of the lock is executing on another
363 * CPU quit the hard path and try to spin.
365 owner = (struct thread *)(v & ~MTX_FLAGMASK);
366 if (TD_IS_RUNNING(owner)) {
367 turnstile_cancel(ts);
374 * If the mutex isn't already contested and a failure occurs
375 * setting the contested bit, the mutex was either released
376 * or the state of the MTX_RECURSED bit changed.
378 if ((v & MTX_CONTESTED) == 0 &&
379 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
380 turnstile_cancel(ts);
386 * We definitely must sleep for this lock.
388 mtx_assert(m, MA_NOTOWNED);
393 "contention: %p at %s:%d wants %s, taken by %s:%d",
394 (void *)tid, file, line, m->lock_object.lo_name,
395 WITNESS_FILE(&m->lock_object),
396 WITNESS_LINE(&m->lock_object));
402 * Block on the turnstile.
404 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
409 "contention end: %s acquired by %p at %s:%d",
410 m->lock_object.lo_name, (void *)tid, file, line);
413 lock_profile_obtain_lock_success(&m->lock_object, contested,
414 waittime, file, line);
418 _mtx_lock_spin_failed(struct mtx *m)
424 /* If the mutex is unlocked, try again. */
428 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
429 m, m->lock_object.lo_name, td, td->td_tid);
431 witness_display_spinlock(&m->lock_object, td);
433 panic("spin lock held too long");
438 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
440 * This is only called if we need to actually spin for the lock. Recursion
444 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
447 int i = 0, contested = 0;
448 uint64_t waittime = 0;
450 if (LOCK_LOG_TEST(&m->lock_object, opts))
451 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
453 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
454 while (!_obtain_lock(m, tid)) {
456 /* Give interrupts a chance while we spin. */
458 while (m->mtx_lock != MTX_UNOWNED) {
459 if (i++ < 10000000) {
463 if (i < 60000000 || kdb_active || panicstr != NULL)
466 _mtx_lock_spin_failed(m);
472 if (LOCK_LOG_TEST(&m->lock_object, opts))
473 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
475 lock_profile_obtain_lock_success(&m->lock_object, contested,
476 waittime, (file), (line));
481 _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
490 tid = (uintptr_t)curthread;
495 KASSERT(m->mtx_lock != MTX_DESTROYED,
496 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
497 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
498 ("thread_lock() of sleep mutex %s @ %s:%d",
499 m->lock_object.lo_name, file, line));
500 WITNESS_CHECKORDER(&m->lock_object,
501 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line);
502 while (!_obtain_lock(m, tid)) {
503 if (m->mtx_lock == tid) {
507 lock_profile_obtain_lock_failed(&m->lock_object,
508 &contested, &waittime);
509 /* Give interrupts a chance while we spin. */
511 while (m->mtx_lock != MTX_UNOWNED) {
514 else if (i < 60000000 ||
515 kdb_active || panicstr != NULL)
518 _mtx_lock_spin_failed(m);
520 if (m != td->td_lock)
525 if (m == td->td_lock)
527 _rel_spin_lock(m); /* does spinlock_exit() */
529 if (m->mtx_recurse == 0)
530 lock_profile_obtain_lock_success(&m->lock_object, contested,
531 waittime, (file), (line));
532 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
534 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
538 thread_lock_block(struct thread *td)
543 THREAD_LOCK_ASSERT(td, MA_OWNED);
545 td->td_lock = &blocked_lock;
546 mtx_unlock_spin(lock);
552 thread_lock_unblock(struct thread *td, struct mtx *new)
554 mtx_assert(new, MA_OWNED);
555 MPASS(td->td_lock == &blocked_lock);
556 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
561 thread_lock_set(struct thread *td, struct mtx *new)
565 mtx_assert(new, MA_OWNED);
566 THREAD_LOCK_ASSERT(td, MA_OWNED);
569 mtx_unlock_spin(lock);
573 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
575 * We are only called here if the lock is recursed or contested (i.e. we
576 * need to wake up a blocked thread).
579 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
581 struct turnstile *ts;
583 if (mtx_recursed(m)) {
584 if (--(m->mtx_recurse) == 0)
585 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
586 if (LOCK_LOG_TEST(&m->lock_object, opts))
587 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
592 * We have to lock the chain before the turnstile so this turnstile
593 * can be removed from the hash list if it is empty.
595 turnstile_chain_lock(&m->lock_object);
596 ts = turnstile_lookup(&m->lock_object);
597 if (LOCK_LOG_TEST(&m->lock_object, opts))
598 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
601 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
602 _release_lock_quick(m);
604 * This turnstile is now no longer associated with the mutex. We can
605 * unlock the chain lock so a new turnstile may take it's place.
607 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
608 turnstile_chain_unlock(&m->lock_object);
612 * All the unlocking of MTX_SPIN locks is done inline.
613 * See the _rel_spin_lock() macro for the details.
617 * The backing function for the INVARIANTS-enabled mtx_assert()
619 #ifdef INVARIANT_SUPPORT
621 _mtx_assert(struct mtx *m, int what, const char *file, int line)
624 if (panicstr != NULL || dumping)
628 case MA_OWNED | MA_RECURSED:
629 case MA_OWNED | MA_NOTRECURSED:
631 panic("mutex %s not owned at %s:%d",
632 m->lock_object.lo_name, file, line);
633 if (mtx_recursed(m)) {
634 if ((what & MA_NOTRECURSED) != 0)
635 panic("mutex %s recursed at %s:%d",
636 m->lock_object.lo_name, file, line);
637 } else if ((what & MA_RECURSED) != 0) {
638 panic("mutex %s unrecursed at %s:%d",
639 m->lock_object.lo_name, file, line);
644 panic("mutex %s owned at %s:%d",
645 m->lock_object.lo_name, file, line);
648 panic("unknown mtx_assert at %s:%d", file, line);
654 * The MUTEX_DEBUG-enabled mtx_validate()
656 * Most of these checks have been moved off into the LO_INITIALIZED flag
657 * maintained by the witness code.
661 void mtx_validate(struct mtx *);
664 mtx_validate(struct mtx *m)
668 * XXX: When kernacc() does not require Giant we can reenable this check
672 * Can't call kernacc() from early init386(), especially when
673 * initializing Giant mutex, because some stuff in kernacc()
674 * requires Giant itself.
677 if (!kernacc((caddr_t)m, sizeof(m),
678 VM_PROT_READ | VM_PROT_WRITE))
679 panic("Can't read and write to mutex %p", m);
685 * General init routine used by the MTX_SYSINIT() macro.
688 mtx_sysinit(void *arg)
690 struct mtx_args *margs = arg;
692 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
696 * Mutex initialization routine; initialize lock `m' of type contained in
697 * `opts' with options contained in `opts' and name `name.' The optional
698 * lock type `type' is used as a general lock category name for use with
702 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
704 struct lock_class *class;
707 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
708 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
711 /* Diagnostic and error correction */
715 /* Determine lock class and lock flags. */
717 class = &lock_class_mtx_spin;
719 class = &lock_class_mtx_sleep;
721 if (opts & MTX_QUIET)
723 if (opts & MTX_RECURSE)
724 flags |= LO_RECURSABLE;
725 if ((opts & MTX_NOWITNESS) == 0)
727 if (opts & MTX_DUPOK)
729 if (opts & MTX_NOPROFILE)
730 flags |= LO_NOPROFILE;
732 /* Initialize mutex. */
733 m->mtx_lock = MTX_UNOWNED;
736 lock_init(&m->lock_object, class, name, type, flags);
740 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
741 * passed in as a flag here because if the corresponding mtx_init() was
742 * called with MTX_QUIET set, then it will already be set in the mutex's
746 mtx_destroy(struct mtx *m)
750 MPASS(mtx_unowned(m));
752 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
754 /* Perform the non-mtx related part of mtx_unlock_spin(). */
755 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
758 curthread->td_locks--;
760 /* Tell witness this isn't locked to make it happy. */
761 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
765 m->mtx_lock = MTX_DESTROYED;
766 lock_destroy(&m->lock_object);
770 * Intialize the mutex code and system mutexes. This is called from the MD
771 * startup code prior to mi_startup(). The per-CPU data space needs to be
772 * setup before this is called.
778 /* Setup turnstiles so that sleep mutexes work. */
782 * Initialize mutexes.
784 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
785 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
786 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
787 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
788 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
789 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
795 db_show_mtx(struct lock_object *lock)
800 m = (struct mtx *)lock;
802 db_printf(" flags: {");
803 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
807 if (m->lock_object.lo_flags & LO_RECURSABLE)
808 db_printf(", RECURSE");
809 if (m->lock_object.lo_flags & LO_DUPOK)
810 db_printf(", DUPOK");
812 db_printf(" state: {");
814 db_printf("UNOWNED");
815 else if (mtx_destroyed(m))
816 db_printf("DESTROYED");
819 if (m->mtx_lock & MTX_CONTESTED)
820 db_printf(", CONTESTED");
821 if (m->mtx_lock & MTX_RECURSED)
822 db_printf(", RECURSED");
825 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
827 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
828 td->td_tid, td->td_proc->p_pid, td->td_name);
830 db_printf(" recursed: %d\n", m->mtx_recurse);