2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Machine independent bits of mutex implementation.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "opt_adaptive_mutexes.h"
41 #include "opt_mprof.h"
42 #include "opt_mutex_wake_all.h"
43 #include "opt_sched.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
50 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
63 #include <machine/atomic.h>
64 #include <machine/bus.h>
65 #include <machine/cpu.h>
69 #include <fs/devfs/devfs_int.h>
72 #include <vm/vm_extern.h>
75 * Force MUTEX_WAKE_ALL for now.
76 * single thread wakeup needs fixes to avoid race conditions with
77 * priority inheritance.
79 #ifndef MUTEX_WAKE_ALL
80 #define MUTEX_WAKE_ALL
84 * Internal utility macros.
86 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
88 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
91 static void db_show_mtx(struct lock_object *lock);
95 * Lock classes for sleep and spin mutexes.
97 struct lock_class lock_class_mtx_sleep = {
99 LC_SLEEPLOCK | LC_RECURSABLE,
104 struct lock_class lock_class_mtx_spin = {
106 LC_SPINLOCK | LC_RECURSABLE,
113 * System-wide mutexes
115 struct mtx sched_lock;
118 #ifdef MUTEX_PROFILING
119 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
120 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
121 static int mutex_prof_enable = 0;
122 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
123 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
132 uintmax_t cnt_contest_holding;
133 uintmax_t cnt_contest_locking;
134 struct mutex_prof *next;
138 * mprof_buf is a static pool of profiling records to avoid possible
139 * reentrance of the memory allocation functions.
141 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
144 #define NUM_MPROF_BUFFERS MPROF_BUFFERS
146 #define NUM_MPROF_BUFFERS 1000
148 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
149 static int first_free_mprof_buf;
150 #ifndef MPROF_HASH_SIZE
151 #define MPROF_HASH_SIZE 1009
153 #if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
154 #error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
156 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
157 /* SWAG: sbuf size = avg stat. line size * number of locks */
158 #define MPROF_SBUF_SIZE 256 * 400
160 static int mutex_prof_acquisitions;
161 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
162 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
163 static int mutex_prof_records;
164 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
165 &mutex_prof_records, 0, "Number of profiling records");
166 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
167 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
168 &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
169 static int mutex_prof_rejected;
170 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
171 &mutex_prof_rejected, 0, "Number of rejected profiling records");
172 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
173 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
174 &mutex_prof_hashsize, 0, "Hash size");
175 static int mutex_prof_collisions = 0;
176 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
177 &mutex_prof_collisions, 0, "Number of hash collisions");
180 * mprof_mtx protects the profiling buffers and the hash.
182 static struct mtx mprof_mtx;
183 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
191 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
195 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
199 static int multiplier = 1;
201 if (first_free_mprof_buf == 0)
202 return (SYSCTL_OUT(req, "No locking recorded",
203 sizeof("No locking recorded")));
206 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
207 sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
208 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
210 * XXX this spinlock seems to be by far the largest perpetrator
211 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
212 * even before I pessimized it further by moving the average
215 mtx_lock_spin(&mprof_mtx);
216 for (i = 0; i < first_free_mprof_buf; ++i) {
217 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
218 mprof_buf[i].cnt_max / 1000,
219 mprof_buf[i].cnt_tot / 1000,
220 mprof_buf[i].cnt_cur,
221 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
222 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
223 mprof_buf[i].cnt_contest_holding,
224 mprof_buf[i].cnt_contest_locking,
225 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
226 if (sbuf_overflowed(sb)) {
227 mtx_unlock_spin(&mprof_mtx);
233 mtx_unlock_spin(&mprof_mtx);
235 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
239 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
240 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
243 reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
247 if (first_free_mprof_buf == 0)
251 error = sysctl_handle_int(oidp, &v, 0, req);
254 if (req->newptr == NULL)
259 mtx_lock_spin(&mprof_mtx);
260 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
261 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
262 first_free_mprof_buf = 0;
263 mtx_unlock_spin(&mprof_mtx);
266 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
267 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
271 * Function versions of the inlined __mtx_* macros. These are used by
272 * modules and can also be called from assembly language if needed.
275 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
278 MPASS(curthread != NULL);
279 KASSERT(m->mtx_lock != MTX_DESTROYED,
280 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
281 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
282 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
284 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
286 _get_sleep_lock(m, curthread, opts, file, line);
287 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
289 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
290 curthread->td_locks++;
291 #ifdef MUTEX_PROFILING
292 /* don't reset the timer when/if recursing */
293 if (m->mtx_acqtime == 0) {
294 m->mtx_filename = file;
295 m->mtx_lineno = line;
296 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
297 ++mutex_prof_acquisitions;
303 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
306 MPASS(curthread != NULL);
307 KASSERT(m->mtx_lock != MTX_DESTROYED,
308 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
309 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
310 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
312 curthread->td_locks--;
313 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
314 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
316 mtx_assert(m, MA_OWNED);
317 #ifdef MUTEX_PROFILING
318 if (m->mtx_acqtime != 0) {
319 static const char *unknown = "(unknown)";
320 struct mutex_prof *mpp;
321 u_int64_t acqtime, now;
326 acqtime = m->mtx_acqtime;
330 for (p = m->mtx_filename;
331 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
333 if (p == NULL || *p == '\0')
335 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
336 hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
337 mtx_lock_spin(&mprof_mtx);
338 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
339 if (mpp->line == m->mtx_lineno &&
340 strcmp(mpp->file, p) == 0)
343 /* Just exit if we cannot get a trace buffer */
344 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
345 ++mutex_prof_rejected;
348 mpp = &mprof_buf[first_free_mprof_buf++];
349 mpp->name = mtx_name(m);
351 mpp->line = m->mtx_lineno;
352 mpp->next = mprof_hash[hash];
353 if (mprof_hash[hash] != NULL)
354 ++mutex_prof_collisions;
355 mprof_hash[hash] = mpp;
356 ++mutex_prof_records;
359 * Record if the mutex has been held longer now than ever
362 if (now - acqtime > mpp->cnt_max)
363 mpp->cnt_max = now - acqtime;
364 mpp->cnt_tot += now - acqtime;
367 * There's a small race, really we should cmpxchg
368 * 0 with the current value, but that would bill
369 * the contention to the wrong lock instance if
370 * it followed this also.
372 mpp->cnt_contest_holding += m->mtx_contest_holding;
373 m->mtx_contest_holding = 0;
374 mpp->cnt_contest_locking += m->mtx_contest_locking;
375 m->mtx_contest_locking = 0;
377 mtx_unlock_spin(&mprof_mtx);
381 _rel_sleep_lock(m, curthread, opts, file, line);
385 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
388 MPASS(curthread != NULL);
389 KASSERT(m->mtx_lock != MTX_DESTROYED,
390 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
391 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
392 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
393 m->mtx_object.lo_name, file, line));
394 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
396 _get_spin_lock(m, curthread, opts, file, line);
397 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
399 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
403 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
406 MPASS(curthread != NULL);
407 KASSERT(m->mtx_lock != MTX_DESTROYED,
408 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
409 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
410 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
411 m->mtx_object.lo_name, file, line));
412 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
413 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
415 mtx_assert(m, MA_OWNED);
420 * The important part of mtx_trylock{,_flags}()
421 * Tries to acquire lock `m.' If this function is called on a mutex that
422 * is already owned, it will recursively acquire the lock.
425 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
429 MPASS(curthread != NULL);
430 KASSERT(m->mtx_lock != MTX_DESTROYED,
431 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
432 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
433 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
436 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
438 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
441 rval = _obtain_lock(m, (uintptr_t)curthread);
443 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
445 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
447 curthread->td_locks++;
454 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
456 * We call this if the lock is either contested (i.e. we need to go to
457 * sleep waiting for it), or if we need to recurse on it.
460 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
463 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
464 volatile struct thread *owner;
470 #ifdef MUTEX_PROFILING
475 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
476 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
477 m->mtx_object.lo_name, file, line));
479 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
480 if (LOCK_LOG_TEST(&m->mtx_object, opts))
481 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
485 if (LOCK_LOG_TEST(&m->mtx_object, opts))
487 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
488 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
490 #ifdef MUTEX_PROFILING
493 while (!_obtain_lock(m, tid)) {
494 #ifdef MUTEX_PROFILING
496 atomic_add_int(&m->mtx_contest_holding, 1);
498 turnstile_lock(&m->mtx_object);
502 * Check if the lock has been released while spinning for
503 * the turnstile chain lock.
505 if (v == MTX_UNOWNED) {
506 turnstile_release(&m->mtx_object);
511 #ifdef MUTEX_WAKE_ALL
512 MPASS(v != MTX_CONTESTED);
515 * The mutex was marked contested on release. This means that
516 * there are other threads blocked on it. Grab ownership of
517 * it and propagate its priority to the current thread if
520 if (v == MTX_CONTESTED) {
521 m->mtx_lock = tid | MTX_CONTESTED;
522 turnstile_claim(&m->mtx_object);
528 * If the mutex isn't already contested and a failure occurs
529 * setting the contested bit, the mutex was either released
530 * or the state of the MTX_RECURSED bit changed.
532 if ((v & MTX_CONTESTED) == 0 &&
533 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
534 turnstile_release(&m->mtx_object);
539 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
541 * If the current owner of the lock is executing on another
542 * CPU, spin instead of blocking.
544 owner = (struct thread *)(v & ~MTX_FLAGMASK);
545 #ifdef ADAPTIVE_GIANT
546 if (TD_IS_RUNNING(owner)) {
548 if (m != &Giant && TD_IS_RUNNING(owner)) {
550 turnstile_release(&m->mtx_object);
551 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
556 #endif /* SMP && !NO_ADAPTIVE_MUTEXES */
559 * We definitely must sleep for this lock.
561 mtx_assert(m, MA_NOTOWNED);
566 "contention: %p at %s:%d wants %s, taken by %s:%d",
567 (void *)tid, file, line, m->mtx_object.lo_name,
568 WITNESS_FILE(&m->mtx_object),
569 WITNESS_LINE(&m->mtx_object));
575 * Block on the turnstile.
577 turnstile_wait(&m->mtx_object, mtx_owner(m),
584 "contention end: %s acquired by %p at %s:%d",
585 m->mtx_object.lo_name, (void *)tid, file, line);
588 #ifdef MUTEX_PROFILING
590 m->mtx_contest_locking++;
591 m->mtx_contest_holding = 0;
598 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
600 * This is only called if we need to actually spin for the lock. Recursion
604 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
610 if (LOCK_LOG_TEST(&m->mtx_object, opts))
611 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
613 while (!_obtain_lock(m, tid)) {
615 /* Give interrupts a chance while we spin. */
617 while (m->mtx_lock != MTX_UNOWNED) {
618 if (i++ < 10000000) {
622 if (i < 60000000 || kdb_active || panicstr != NULL)
627 /* If the mutex is unlocked, try again. */
631 "spin lock %p (%s) held by %p (tid %d) too long\n",
632 m, m->mtx_object.lo_name, td, td->td_tid);
634 witness_display_spinlock(&m->mtx_object, td);
636 panic("spin lock held too long");
643 if (LOCK_LOG_TEST(&m->mtx_object, opts))
644 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
651 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
653 * We are only called here if the lock is recursed or contested (i.e. we
654 * need to wake up a blocked thread).
657 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
659 struct turnstile *ts;
661 struct thread *td, *td1;
664 if (mtx_recursed(m)) {
665 if (--(m->mtx_recurse) == 0)
666 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
667 if (LOCK_LOG_TEST(&m->mtx_object, opts))
668 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
672 turnstile_lock(&m->mtx_object);
673 ts = turnstile_lookup(&m->mtx_object);
674 if (LOCK_LOG_TEST(&m->mtx_object, opts))
675 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
677 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
679 _release_lock_quick(m);
680 if (LOCK_LOG_TEST(&m->mtx_object, opts))
681 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
682 turnstile_release(&m->mtx_object);
690 td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE);
692 #ifdef MUTEX_WAKE_ALL
693 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
694 _release_lock_quick(m);
696 if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) {
697 _release_lock_quick(m);
698 if (LOCK_LOG_TEST(&m->mtx_object, opts))
699 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
701 m->mtx_lock = MTX_CONTESTED;
702 if (LOCK_LOG_TEST(&m->mtx_object, opts))
703 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
707 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
711 * XXX: This is just a hack until preemption is done. However,
712 * once preemption is done we need to either wrap the
713 * turnstile_signal() and release of the actual lock in an
714 * extra critical section or change the preemption code to
715 * always just set a flag and never do instant-preempts.
718 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
720 mtx_lock_spin(&sched_lock);
721 if (!TD_IS_RUNNING(td1)) {
723 if (td->td_ithd != NULL) {
724 struct ithd *it = td->td_ithd;
726 if (it->it_interrupted) {
727 if (LOCK_LOG_TEST(&m->mtx_object, opts))
729 "_mtx_unlock_sleep: %p interrupted %p",
730 it, it->it_interrupted);
735 if (LOCK_LOG_TEST(&m->mtx_object, opts))
737 "_mtx_unlock_sleep: %p switching out lock=%p", m,
738 (void *)m->mtx_lock);
740 mi_switch(SW_INVOL, NULL);
741 if (LOCK_LOG_TEST(&m->mtx_object, opts))
742 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
743 m, (void *)m->mtx_lock);
745 mtx_unlock_spin(&sched_lock);
752 * All the unlocking of MTX_SPIN locks is done inline.
753 * See the _rel_spin_lock() macro for the details.
757 * The backing function for the INVARIANTS-enabled mtx_assert()
759 #ifdef INVARIANT_SUPPORT
761 _mtx_assert(struct mtx *m, int what, const char *file, int line)
764 if (panicstr != NULL || dumping)
768 case MA_OWNED | MA_RECURSED:
769 case MA_OWNED | MA_NOTRECURSED:
771 panic("mutex %s not owned at %s:%d",
772 m->mtx_object.lo_name, file, line);
773 if (mtx_recursed(m)) {
774 if ((what & MA_NOTRECURSED) != 0)
775 panic("mutex %s recursed at %s:%d",
776 m->mtx_object.lo_name, file, line);
777 } else if ((what & MA_RECURSED) != 0) {
778 panic("mutex %s unrecursed at %s:%d",
779 m->mtx_object.lo_name, file, line);
784 panic("mutex %s owned at %s:%d",
785 m->mtx_object.lo_name, file, line);
788 panic("unknown mtx_assert at %s:%d", file, line);
794 * The MUTEX_DEBUG-enabled mtx_validate()
796 * Most of these checks have been moved off into the LO_INITIALIZED flag
797 * maintained by the witness code.
801 void mtx_validate(struct mtx *);
804 mtx_validate(struct mtx *m)
808 * XXX: When kernacc() does not require Giant we can reenable this check
812 * Can't call kernacc() from early init386(), especially when
813 * initializing Giant mutex, because some stuff in kernacc()
814 * requires Giant itself.
817 if (!kernacc((caddr_t)m, sizeof(m),
818 VM_PROT_READ | VM_PROT_WRITE))
819 panic("Can't read and write to mutex %p", m);
825 * General init routine used by the MTX_SYSINIT() macro.
828 mtx_sysinit(void *arg)
830 struct mtx_args *margs = arg;
832 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
836 * Mutex initialization routine; initialize lock `m' of type contained in
837 * `opts' with options contained in `opts' and name `name.' The optional
838 * lock type `type' is used as a general lock category name for use with
842 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
844 struct lock_class *class;
847 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
848 MTX_NOWITNESS | MTX_DUPOK)) == 0);
851 /* Diagnostic and error correction */
855 /* Determine lock class and lock flags. */
857 class = &lock_class_mtx_spin;
859 class = &lock_class_mtx_sleep;
861 if (opts & MTX_QUIET)
863 if (opts & MTX_RECURSE)
864 flags |= LO_RECURSABLE;
865 if ((opts & MTX_NOWITNESS) == 0)
867 if (opts & MTX_DUPOK)
870 /* Initialize mutex. */
871 m->mtx_lock = MTX_UNOWNED;
873 #ifdef MUTEX_PROFILING
875 m->mtx_filename = NULL;
877 m->mtx_contest_holding = 0;
878 m->mtx_contest_locking = 0;
881 lock_init(&m->mtx_object, class, name, type, flags);
885 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
886 * passed in as a flag here because if the corresponding mtx_init() was
887 * called with MTX_QUIET set, then it will already be set in the mutex's
891 mtx_destroy(struct mtx *m)
895 MPASS(mtx_unowned(m));
897 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
899 /* Perform the non-mtx related part of mtx_unlock_spin(). */
900 if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin)
903 curthread->td_locks--;
905 /* Tell witness this isn't locked to make it happy. */
906 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
910 m->mtx_lock = MTX_DESTROYED;
911 lock_destroy(&m->mtx_object);
915 * Intialize the mutex code and system mutexes. This is called from the MD
916 * startup code prior to mi_startup(). The per-CPU data space needs to be
917 * setup before this is called.
923 /* Setup turnstiles so that sleep mutexes work. */
927 * Initialize mutexes.
929 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
930 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
931 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
932 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
938 db_show_mtx(struct lock_object *lock)
943 m = (struct mtx *)lock;
945 db_printf(" flags: {");
946 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
950 if (m->mtx_object.lo_flags & LO_RECURSABLE)
951 db_printf(", RECURSE");
952 if (m->mtx_object.lo_flags & LO_DUPOK)
953 db_printf(", DUPOK");
955 db_printf(" state: {");
957 db_printf("UNOWNED");
960 if (m->mtx_lock & MTX_CONTESTED)
961 db_printf(", CONTESTED");
962 if (m->mtx_lock & MTX_RECURSED)
963 db_printf(", RECURSED");
966 if (!mtx_unowned(m)) {
968 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
969 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
971 db_printf(" recursed: %d\n", m->mtx_recurse);