2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
36 #include <sys/queue.h>
40 #include <machine/atomic.h>
41 #include <machine/cpufunc.h>
42 #include <machine/globals.h>
46 #include <machine/mutex.h>
51 * Mutex types and options stored in mutex->mtx_flags
53 #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
54 #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
55 #define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */
58 * Option flags passed to certain lock/unlock routines, through the use
59 * of corresponding mtx_{lock,unlock}_flags() interface macros.
61 * XXX: The only reason we make these bits not interfere with the above "types
62 * and options" bits is because we have to pass both to the witness
63 * routines right now; if/when we clean up the witness interface to
64 * not check for mutex type from the passed in flag, but rather from
65 * the mutex lock's mtx_flags field, then we can change these values to
68 #define MTX_NOSWITCH 0x00000004 /* Do not switch on release */
69 #define MTX_QUIET 0x00000008 /* Don't log a mutex event */
72 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
73 * with the exception of MTX_UNOWNED, applies to spin locks.
75 #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
76 #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
77 #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
78 #define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED)
90 volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */
91 volatile u_int mtx_recurse; /* number of recursive holds */
92 u_int mtx_saveintr; /* saved flags (for spin locks) */
93 int mtx_flags; /* flags passed to mtx_init() */
94 const char *mtx_description;
95 TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */
96 LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */
97 struct mtx *mtx_next; /* all existing locks */
98 struct mtx *mtx_prev; /* in system... */
99 struct mtx_debug *mtx_debug; /* debugging information... */
103 * XXX: Friendly reminder to fix things in MP code that is presently being
106 #define mp_fixme(string)
111 * Strings for KTR_LOCK tracing.
113 extern char STR_mtx_lock_slp[];
114 extern char STR_mtx_lock_spn[];
115 extern char STR_mtx_unlock_slp[];
116 extern char STR_mtx_unlock_spn[];
121 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
122 * of the kernel via macros, thus allowing us to use the cpp __FILE__
123 * and __LINE__. These functions should not be called directly by any
124 * code using the IPI. Their macros cover their functionality.
126 * [See below for descriptions]
129 void mtx_init(struct mtx *m, const char *description, int opts);
130 void mtx_destroy(struct mtx *m);
131 void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line);
132 void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
133 void _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr,
134 const char *file, int line);
135 void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
136 int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
139 * We define our machine-independent (unoptimized) mutex micro-operations
140 * here, if they are not already defined in the machine-dependent mutex.h
143 /* Actually obtain mtx_lock */
145 #define _obtain_lock(mp, tid) \
146 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
149 /* Actually release mtx_lock */
150 #ifndef _release_lock
151 #define _release_lock(mp, tid) \
152 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
155 /* Actually release mtx_lock quickly, assuming we own it. */
156 #ifndef _release_lock_quick
157 #define _release_lock_quick(mp) \
158 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
162 * Obtain a sleep lock inline, or call the "hard" function if we can't get it
165 #ifndef _get_sleep_lock
166 #define _get_sleep_lock(mp, tid, opts) do { \
167 if (!_obtain_lock((mp), (tid))) \
168 _mtx_lock_sleep((mp), (opts), __FILE__, __LINE__); \
173 * Obtain a spin lock inline, or call the "hard" function if we can't get it
174 * easy. For spinlocks, we handle recursion inline (it turns out that function
175 * calls can be significantly expensive on some architectures).
176 * Since spin locks are not _too_ common, inlining this code is not too big
179 #ifndef _get_spin_lock
180 #define _get_spin_lock(mp, tid, opts) do { \
181 u_int _mtx_intr = save_intr(); \
183 if (!_obtain_lock((mp), (tid))) { \
184 if ((mp)->mtx_lock == (uintptr_t)(tid)) \
185 (mp)->mtx_recurse++; \
187 _mtx_lock_spin((mp), (opts), _mtx_intr, \
188 __FILE__, __LINE__); \
190 (mp)->mtx_saveintr = _mtx_intr; \
195 * Release a sleep lock inline, or call the "hard" function if we can't do it
198 #ifndef _rel_sleep_lock
199 #define _rel_sleep_lock(mp, tid, opts) do { \
200 if (!_release_lock((mp), (tid))) \
201 _mtx_unlock_sleep((mp), (opts), __FILE__, __LINE__); \
206 * For spinlocks, we can handle everything inline, as it's pretty simple and
207 * a function call would be too expensive (at least on some architectures).
208 * Since spin locks are not _too_ common, inlining this code is not too big
211 #ifndef _rel_spin_lock
212 #define _rel_spin_lock(mp) do { \
213 u_int _mtx_intr = (mp)->mtx_saveintr; \
214 if (mtx_recursed((mp))) \
215 (mp)->mtx_recurse--; \
217 _release_lock_quick((mp)); \
218 restore_intr(_mtx_intr); \
224 * Exported lock manipulation interface.
226 * mtx_lock(m) locks MTX_DEF mutex `m'
228 * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
230 * mtx_unlock(m) unlocks MTX_DEF mutex `m'
232 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
234 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
235 * and passes option flags `opts' to the "hard" function, if required.
236 * With these routines, it is possible to pass flags such as MTX_QUIET
237 * and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
239 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
240 * it cannot. Rather, it returns 0 on failure and non-zero on success.
241 * It does NOT handle recursion as we assume that if a caller is properly
242 * using this part of the interface, he will know that the lock in question
245 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
246 * relevant option flags `opts.'
248 * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
250 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
252 #define mtx_lock(m) do { \
253 MPASS(curproc != NULL); \
254 _get_sleep_lock((m), curproc, 0); \
255 CTR5(KTR_LOCK, STR_mtx_lock_slp, (m)->mtx_description, (m), \
256 (m)->mtx_recurse, __FILE__, __LINE__); \
257 WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \
260 #define mtx_lock_spin(m) do { \
261 MPASS(curproc != NULL); \
262 _get_spin_lock((m), curproc, 0); \
263 CTR5(KTR_LOCK, STR_mtx_lock_spn, (m)->mtx_description, (m), \
264 (m)->mtx_recurse, __FILE__, __LINE__); \
265 WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \
268 #define mtx_unlock(m) do { \
269 MPASS(curproc != NULL); \
270 WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \
271 mtx_assert((m), MA_OWNED); \
272 _rel_sleep_lock((m), curproc, 0); \
273 CTR5(KTR_LOCK, STR_mtx_unlock_slp, (m)->mtx_description, (m), \
274 (m)->mtx_recurse, __FILE__, __LINE__); \
277 #define mtx_unlock_spin(m) do { \
278 MPASS(curproc != NULL); \
279 WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \
280 mtx_assert((m), MA_OWNED); \
281 _rel_spin_lock((m)); \
282 CTR5(KTR_LOCK, STR_mtx_unlock_spn, (m)->mtx_description, (m), \
283 (m)->mtx_recurse, __FILE__, __LINE__); \
286 #define mtx_lock_flags(m, opts) do { \
287 MPASS(curproc != NULL); \
288 _get_sleep_lock((m), curproc, (opts)); \
289 if (((opts) & MTX_QUIET) == 0) \
290 CTR5(KTR_LOCK, STR_mtx_lock_slp, \
291 (m)->mtx_description, (m), (m)->mtx_recurse, \
292 __FILE__, __LINE__); \
293 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
297 #define mtx_lock_spin_flags(m, opts) do { \
298 MPASS(curproc != NULL); \
299 _get_spin_lock((m), curproc, (opts)); \
300 if (((opts) & MTX_QUIET) == 0) \
301 CTR5(KTR_LOCK, STR_mtx_lock_spn, \
302 (m)->mtx_description, (m), (m)->mtx_recurse, \
303 __FILE__, __LINE__); \
304 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
308 #define mtx_unlock_flags(m, opts) do { \
309 MPASS(curproc != NULL); \
310 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
312 mtx_assert((m), MA_OWNED); \
313 _rel_sleep_lock((m), curproc, (opts)); \
314 if (((opts) & MTX_QUIET) == 0) \
315 CTR5(KTR_LOCK, STR_mtx_unlock_slp, \
316 (m)->mtx_description, (m), (m)->mtx_recurse, \
317 __FILE__, __LINE__); \
321 * The MTX_SPIN unlock case is all inlined, so we handle the MTX_QUIET
322 * flag right in the macro. Not a problem as if we don't have KTR_LOCK, this
323 * check will be optimized out.
325 #define mtx_unlock_spin_flags(m, opts) do { \
326 MPASS(curproc != NULL); \
327 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
329 mtx_assert((m), MA_OWNED); \
330 _rel_spin_lock((m)); \
331 if (((opts) & MTX_QUIET) == 0) \
332 CTR5(KTR_LOCK, STR_mtx_unlock_spn, \
333 (m)->mtx_description, (m), (m)->mtx_recurse, \
334 __FILE__, __LINE__); \
337 #define mtx_trylock(m) \
338 _mtx_trylock((m), 0, __FILE__, __LINE__)
340 #define mtx_trylock_flags(m, opts) \
341 _mtx_trylock((m), (opts), __FILE__, __LINE__)
343 #define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curproc)
345 #define mtx_recursed(m) ((m)->mtx_recurse != 0)
350 extern struct mtx sched_lock;
351 extern struct mtx Giant;
354 * Giant lock manipulation and clean exit macros.
355 * Used to replace return with an exit Giant and return.
357 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
359 #define DROP_GIANT_NOSWITCH() \
362 WITNESS_SAVE_DECL(Giant); \
364 if (mtx_owned(&Giant)) \
365 WITNESS_SAVE(&Giant, Giant); \
366 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
367 mtx_unlock_flags(&Giant, MTX_NOSWITCH)
369 #define DROP_GIANT() \
372 WITNESS_SAVE_DECL(Giant); \
374 if (mtx_owned(&Giant)) \
375 WITNESS_SAVE(&Giant, Giant); \
376 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
379 #define PICKUP_GIANT() \
380 mtx_assert(&Giant, MA_NOTOWNED); \
381 while (_giantcnt--) \
383 if (mtx_owned(&Giant)) \
384 WITNESS_RESTORE(&Giant, Giant); \
387 #define PARTIAL_PICKUP_GIANT() \
388 mtx_assert(&Giant, MA_NOTOWNED); \
389 while (_giantcnt--) \
391 if (mtx_owned(&Giant)) \
392 WITNESS_RESTORE(&Giant, Giant)
395 * The INVARIANTS-enabled mtx_assert() functionality.
398 #define MA_OWNED 0x01
399 #define MA_NOTOWNED 0x02
400 #define MA_RECURSED 0x04
401 #define MA_NOTRECURSED 0x08
403 void _mtx_assert(struct mtx *m, int what, const char *file, int line);
404 #define mtx_assert(m, what) \
405 _mtx_assert((m), (what), __FILE__, __LINE__)
407 #else /* INVARIANTS */
408 #define mtx_assert(m, what)
409 #endif /* INVARIANTS */
412 * The MUTEX_DEBUG-enabled MPASS*() extra sanity-check macros.
417 panic("Assertion %s failed at %s:%d", #ex, __FILE__, \
420 #define MPASS2(ex, what) \
422 panic("Assertion %s failed at %s:%d", what, __FILE__, \
425 #define MPASS3(ex, file, line) \
427 panic("Assertion %s failed at %s:%d", #ex, file, line)
429 #define MPASS4(ex, what, file, line) \
431 panic("Assertion %s failed at %s:%d", what, file, line)
433 #else /* MUTEX_DEBUG */
435 #define MPASS2(ex, what)
436 #define MPASS3(ex, file, line)
437 #define MPASS4(ex, what, file, line)
438 #endif /* MUTEX_DEBUG */
441 * Exported WITNESS-enabled functions and corresponding wrapper macros.
444 void witness_save(struct mtx *, const char **, int *);
445 void witness_restore(struct mtx *, const char *, int);
446 void witness_enter(struct mtx *, int, const char *, int);
447 void witness_try_enter(struct mtx *, int, const char *, int);
448 void witness_exit(struct mtx *, int, const char *, int);
449 int witness_list(struct proc *);
450 int witness_sleep(int, struct mtx *, const char *, int);
452 #define WITNESS_ENTER(m, t, f, l) \
453 witness_enter((m), (t), (f), (l))
455 #define WITNESS_EXIT(m, t, f, l) \
456 witness_exit((m), (t), (f), (l))
458 #define WITNESS_SLEEP(check, m) \
459 witness_sleep(check, (m), __FILE__, __LINE__)
461 #define WITNESS_SAVE_DECL(n) \
462 const char * __CONCAT(n, __wf); \
463 int __CONCAT(n, __wl)
465 #define WITNESS_SAVE(m, n) \
466 witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl))
468 #define WITNESS_RESTORE(m, n) \
469 witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl))
472 #define witness_enter(m, t, f, l)
473 #define witness_tryenter(m, t, f, l)
474 #define witness_exit(m, t, f, l)
475 #define witness_list(p)
476 #define witness_sleep(c, m, f, l)
478 #define WITNESS_ENTER(m, t, f, l)
479 #define WITNESS_EXIT(m, t, f, l)
480 #define WITNESS_SLEEP(check, m)
481 #define WITNESS_SAVE_DECL(n)
482 #define WITNESS_SAVE(m, n)
483 #define WITNESS_RESTORE(m, n)
488 #endif /* _SYS_MUTEX_H_ */