2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
36 #include <sys/queue.h>
40 #include <machine/atomic.h>
41 #include <machine/bus.h>
42 #include <machine/cpufunc.h>
43 #include <machine/globals.h>
47 #include <machine/mutex.h>
53 * If kern_mutex.c is being built, compile non-inlined versions of various
54 * functions so that kernel modules can use them.
56 #ifndef _KERN_MUTEX_C_
57 #define _MTX_INLINE static __inline
67 #define MTX_DEF 0x0 /* Default (spin/sleep) */
68 #define MTX_SPIN 0x1 /* Spin only lock */
71 #define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
72 #define MTX_NORECURSE 0x8 /* No recursion possible */
73 #define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
74 #define MTX_NOSWITCH 0x20 /* Do not switch on release */
75 #define MTX_FIRST 0x40 /* First spin lock holder */
76 #define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
77 #define MTX_COLD 0x100 /* Mutex init'd before malloc works */
79 /* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
80 #define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
82 /* Flags/value used in mtx_lock */
83 #define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
84 #define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
85 #define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
86 #define MTX_UNOWNED 0x8 /* Cookie for free mutex */
92 /* If you add anything here, adjust the mtxf_t definition below */
93 struct witness *mtxd_witness;
94 LIST_ENTRY(mtx) mtxd_held;
95 const char *mtxd_file;
97 const char *mtxd_description;
100 #define mtx_description mtx_debug->mtxd_description
101 #define mtx_held mtx_debug->mtxd_held
102 #define mtx_line mtx_debug->mtxd_line
103 #define mtx_file mtx_debug->mtxd_file
104 #define mtx_witness mtx_debug->mtxd_witness
111 volatile uintptr_t mtx_lock; /* lock owner/gate/flags */
112 volatile u_int mtx_recurse; /* number of recursive holds */
113 u_int mtx_saveintr; /* saved flags (for spin locks) */
115 struct mtx_debug *mtx_debug;
117 const char *mtx_description;
119 TAILQ_HEAD(, proc) mtx_blocked;
120 LIST_ENTRY(mtx) mtx_contested;
121 struct mtx *mtx_next; /* all locks in system */
122 struct mtx *mtx_prev;
126 #define MUTEX_DECLARE(modifiers, name) \
127 static struct mtx_debug __mtx_debug_##name; \
128 modifiers struct mtx name = { 0, 0, 0, &__mtx_debug_##name }
130 #define MUTEX_DECLARE(modifiers, name) modifiers struct mtx name
133 #define mp_fixme(string)
137 void mtx_init(struct mtx *m, const char *description, int flag);
138 void mtx_enter_hard(struct mtx *, int type, int saveintr);
139 void mtx_exit_hard(struct mtx *, int type);
140 void mtx_destroy(struct mtx *m);
143 * Wrap the following functions with cpp macros so that filenames and line
144 * numbers are embedded in the code correctly.
146 #if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
147 void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
148 int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
149 void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
152 #define mtx_enter(mtxp, type) \
153 _mtx_enter((mtxp), (type), __FILE__, __LINE__)
155 #define mtx_try_enter(mtxp, type) \
156 _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
158 #define mtx_exit(mtxp, type) \
159 _mtx_exit((mtxp), (type), __FILE__, __LINE__)
162 extern struct mtx sched_lock;
163 extern struct mtx Giant;
166 * Used to replace return with an exit Giant and return.
171 mtx_exit(&Giant, MTX_DEF); \
177 mtx_exit(&Giant, MTX_DEF); \
181 #define DROP_GIANT() \
184 WITNESS_SAVE_DECL(Giant); \
186 if (mtx_owned(&Giant)) \
187 WITNESS_SAVE(&Giant, Giant); \
188 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
189 mtx_exit(&Giant, MTX_DEF)
191 #define PICKUP_GIANT() \
192 mtx_assert(&Giant, MA_NOTOWNED); \
193 while (_giantcnt--) \
194 mtx_enter(&Giant, MTX_DEF); \
195 if (mtx_owned(&Giant)) \
196 WITNESS_RESTORE(&Giant, Giant); \
199 #define PARTIAL_PICKUP_GIANT() \
200 mtx_assert(&Giant, MA_NOTOWNED); \
201 while (_giantcnt--) \
202 mtx_enter(&Giant, MTX_DEF); \
203 if (mtx_owned(&Giant)) \
204 WITNESS_RESTORE(&Giant, Giant)
212 #define MA_NOTOWNED 2
213 #define mtx_assert(m, what) { \
216 if (!mtx_owned((m))) \
217 panic("mutex %s not owned at %s:%d", \
218 (m)->mtx_description, __FILE__, __LINE__); \
221 if (mtx_owned((m))) \
222 panic("mutex %s owned at %s:%d", \
223 (m)->mtx_description, __FILE__, __LINE__); \
226 panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
229 #else /* INVARIANTS */
230 #define mtx_assert(m, what)
231 #endif /* INVARIANTS */
234 #define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
235 #ex, __FILE__, __LINE__)
236 #define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
237 what, __FILE__, __LINE__)
239 #else /* MUTEX_DEBUG */
241 #define MPASS2(ex, where)
242 #endif /* MUTEX_DEBUG */
246 #error WITNESS requires MUTEX_DEBUG
247 #endif /* MUTEX_DEBUG */
248 #define WITNESS_ENTER(m, t, f, l) \
249 if ((m)->mtx_witness != NULL) \
250 witness_enter((m), (t), (f), (l))
251 #define WITNESS_EXIT(m, t, f, l) \
252 if ((m)->mtx_witness != NULL) \
253 witness_exit((m), (t), (f), (l))
255 #define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
256 #define WITNESS_SAVE_DECL(n) \
257 const char * __CONCAT(n, __wf); \
258 int __CONCAT(n, __wl)
260 #define WITNESS_SAVE(m, n) \
262 if ((m)->mtx_witness != NULL) \
263 witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
266 #define WITNESS_RESTORE(m, n) \
268 if ((m)->mtx_witness != NULL) \
269 witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
272 void witness_init(struct mtx *, int flag);
273 void witness_destroy(struct mtx *);
274 void witness_enter(struct mtx *, int, const char *, int);
275 void witness_try_enter(struct mtx *, int, const char *, int);
276 void witness_exit(struct mtx *, int, const char *, int);
277 void witness_display(void(*)(const char *fmt, ...));
278 void witness_list(struct proc *);
279 int witness_sleep(int, struct mtx *, const char *, int);
280 void witness_save(struct mtx *, const char **, int *);
281 void witness_restore(struct mtx *, const char *, int);
283 #define WITNESS_ENTER(m, t, f, l)
284 #define WITNESS_EXIT(m, t, f, l)
285 #define WITNESS_SLEEP(check, m)
286 #define WITNESS_SAVE_DECL(n)
287 #define WITNESS_SAVE(m, n)
288 #define WITNESS_RESTORE(m, n)
291 * flag++ is slezoid way of shutting up unused parameter warning
294 #define witness_init(m, flag) flag++
295 #define witness_destroy(m)
296 #define witness_enter(m, t, f, l)
297 #define witness_try_enter(m, t, f, l)
298 #define witness_exit(m, t, f, l)
302 * Assembly macros (for internal use only)
303 *------------------------------------------------------------------------------
306 #define _V(x) __STRING(x)
309 * Default, unoptimized mutex micro-operations
313 /* Actually obtain mtx_lock */
314 #define _obtain_lock(mp, tid) \
315 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
318 #ifndef _release_lock
319 /* Actually release mtx_lock */
320 #define _release_lock(mp, tid) \
321 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
324 #ifndef _release_lock_quick
325 /* Actually release mtx_lock quickly assuming that we own it */
326 #define _release_lock_quick(mp) \
327 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
330 #ifndef _getlock_sleep
331 /* Get a sleep lock, deal with recursion inline. */
332 #define _getlock_sleep(mp, tid, type) do { \
333 if (!_obtain_lock(mp, tid)) { \
334 if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
335 mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
337 atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE); \
338 (mp)->mtx_recurse++; \
344 #ifndef _getlock_spin_block
345 /* Get a spin lock, handle recursion inline (as the less common case) */
346 #define _getlock_spin_block(mp, tid, type) do { \
347 u_int _mtx_intr = save_intr(); \
349 if (!_obtain_lock(mp, tid)) \
350 mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
352 (mp)->mtx_saveintr = _mtx_intr; \
356 #ifndef _getlock_norecurse
358 * Get a lock without any recursion handling. Calls the hard enter function if
359 * we can't get it inline.
361 #define _getlock_norecurse(mp, tid, type) do { \
362 if (!_obtain_lock(mp, tid)) \
363 mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
367 #ifndef _exitlock_norecurse
369 * Release a sleep lock assuming we haven't recursed on it, recursion is handled
370 * in the hard function.
372 #define _exitlock_norecurse(mp, tid, type) do { \
373 if (!_release_lock(mp, tid)) \
374 mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
380 * Release a sleep lock when its likely we recursed (the code to
381 * deal with simple recursion is inline).
383 #define _exitlock(mp, tid, type) do { \
384 if (!_release_lock(mp, tid)) { \
385 if ((mp)->mtx_lock & MTX_RECURSE) { \
386 if (--((mp)->mtx_recurse) == 0) \
387 atomic_clear_ptr(&(mp)->mtx_lock, \
390 mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
396 #ifndef _exitlock_spin
397 /* Release a spin lock (with possible recursion). */
398 #define _exitlock_spin(mp) do { \
399 if ((mp)->mtx_recurse == 0) { \
400 int _mtx_intr = (mp)->mtx_saveintr; \
402 _release_lock_quick(mp); \
403 restore_intr(_mtx_intr); \
405 (mp)->mtx_recurse--; \
411 * Externally visible mutex functions.
412 *------------------------------------------------------------------------------
416 * Return non-zero if a mutex is already owned by the current thread.
418 #define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
421 #ifdef _KERN_MUTEX_C_
425 * KTR_EXTEND saves file name and line for all entries, so we don't need them
426 * here. Theoretically we should also change the entries which refer to them
427 * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
428 * parameters, it doesn't do any harm to leave them.
430 char STR_mtx_enter_fmt[] = "GOT %s [%x] r=%d";
431 char STR_mtx_exit_fmt[] = "REL %s [%x] r=%d";
432 char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] result=%d";
434 char STR_mtx_enter_fmt[] = "GOT %s [%x] at %s:%d r=%d";
435 char STR_mtx_exit_fmt[] = "REL %s [%x] at %s:%d r=%d";
436 char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] at %s:%d result=%d";
438 char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
439 char STR_mtx_owned[] = "mtx_owned(mpp)";
440 char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
441 #else /* _KERN_MUTEX_C_ */
442 extern char STR_mtx_enter_fmt[];
443 extern char STR_mtx_bad_type[];
444 extern char STR_mtx_exit_fmt[];
445 extern char STR_mtx_owned[];
446 extern char STR_mtx_recurse[];
447 extern char STR_mtx_try_enter_fmt[];
448 #endif /* _KERN_MUTEX_C_ */
452 * Get lock 'm', the macro handles the easy (and most common cases) and leaves
453 * the slow stuff to the mtx_enter_hard() function.
455 * Note: since type is usually a constant much of this code is optimized out.
458 _mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
460 struct mtx *mpp = mtxp;
462 /* bits only valid on mtx_exit() */
463 MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
466 if ((type) & MTX_SPIN) {
468 * Easy cases of spin locks:
470 * 1) We already own the lock and will simply recurse on it (if
473 * 2) The lock is free, we just get it
475 if ((type) & MTX_RLIKELY) {
477 * Check for recursion, if we already have this
478 * lock we just bump the recursion count.
480 if (mpp->mtx_lock == (uintptr_t)CURTHD) {
486 if (((type) & MTX_TOPHALF) == 0) {
488 * If an interrupt thread uses this we must block
491 if ((type) & MTX_FIRST) {
494 _getlock_norecurse(mpp, CURTHD,
495 (type) & MTX_HARDOPTS);
497 _getlock_spin_block(mpp, CURTHD,
498 (type) & MTX_HARDOPTS);
501 _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
504 if ((type) & MTX_RLIKELY)
505 _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
507 _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
510 WITNESS_ENTER(mpp, type, file, line);
511 CTR5(KTR_LOCK, STR_mtx_enter_fmt,
512 mpp->mtx_description, mpp, file, line,
517 * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
519 * XXX DOES NOT HANDLE RECURSION
522 _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
524 struct mtx *const mpp = mtxp;
527 rval = _obtain_lock(mpp, CURTHD);
529 if (rval && mpp->mtx_witness != NULL) {
530 MPASS(mpp->mtx_recurse == 0);
531 witness_try_enter(mpp, type, file, line);
534 CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
535 mpp->mtx_description, mpp, file, line, rval);
544 _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
546 struct mtx *const mpp = mtxp;
548 MPASS2(mtx_owned(mpp), STR_mtx_owned);
549 WITNESS_EXIT(mpp, type, file, line);
550 CTR5(KTR_LOCK, STR_mtx_exit_fmt,
551 mpp->mtx_description, mpp, file, line,
553 if ((type) & MTX_SPIN) {
554 if ((type) & MTX_NORECURSE) {
555 int mtx_intr = mpp->mtx_saveintr;
557 MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
558 _release_lock_quick(mpp);
559 if (((type) & MTX_TOPHALF) == 0) {
560 if ((type) & MTX_FIRST) {
564 restore_intr(mtx_intr);
567 if (((type & MTX_TOPHALF) == 0) &&
568 (type & MTX_FIRST)) {
575 /* Handle sleep locks */
576 if ((type) & MTX_RLIKELY)
577 _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
579 _exitlock_norecurse(mpp, CURTHD,
580 (type) & MTX_HARDOPTS);
585 #endif /* KLD_MODULE */
587 /* Avoid namespace pollution */
588 #ifndef _KERN_MUTEX_C_
591 #undef _release_lock_quick
592 #undef _getlock_sleep
593 #undef _getlock_spin_block
594 #undef _getlock_norecurse
595 #undef _exitlock_norecurse
597 #undef _exitlock_spin
598 #endif /* !_KERN_MUTEX_C_ */
602 #endif /* _SYS_MUTEX_H_ */