2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
36 #include <sys/queue.h>
40 #include <sys/systm.h>
41 #include <machine/atomic.h>
42 #include <machine/cpufunc.h>
43 #include <machine/globals.h>
47 #include <machine/mutex.h>
52 * Mutex types and options stored in mutex->mtx_flags
54 #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
55 #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
56 #define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */
59 * Option flags passed to certain lock/unlock routines, through the use
60 * of corresponding mtx_{lock,unlock}_flags() interface macros.
62 * XXX: The only reason we make these bits not interfere with the above "types
63 * and options" bits is because we have to pass both to the witness
64 * routines right now; if/when we clean up the witness interface to
65 * not check for mutex type from the passed in flag, but rather from
66 * the mutex lock's mtx_flags field, then we can change these values to
69 #define MTX_NOSWITCH 0x00000004 /* Do not switch on release */
70 #define MTX_QUIET 0x00000008 /* Don't log a mutex event */
73 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
74 * with the exception of MTX_UNOWNED, applies to spin locks.
76 #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
77 #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
78 #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
79 #define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED)
91 volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */
92 volatile u_int mtx_recurse; /* number of recursive holds */
93 u_int mtx_saveintr; /* saved flags (for spin locks) */
94 int mtx_flags; /* flags passed to mtx_init() */
95 const char *mtx_description;
96 TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */
97 LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */
98 struct mtx *mtx_next; /* all existing locks */
99 struct mtx *mtx_prev; /* in system... */
100 struct mtx_debug *mtx_debug; /* debugging information... */
104 * XXX: Friendly reminder to fix things in MP code that is presently being
107 #define mp_fixme(string)
112 * Strings for KTR_LOCK tracing.
114 extern char STR_mtx_lock_slp[];
115 extern char STR_mtx_lock_spn[];
116 extern char STR_mtx_unlock_slp[];
117 extern char STR_mtx_unlock_spn[];
122 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
123 * of the kernel via macros, thus allowing us to use the cpp __FILE__
124 * and __LINE__. These functions should not be called directly by any
125 * code using the IPI. Their macros cover their functionality.
127 * [See below for descriptions]
130 void mtx_init(struct mtx *m, const char *description, int opts);
131 void mtx_destroy(struct mtx *m);
132 void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line);
133 void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
134 void _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr,
135 const char *file, int line);
136 void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
137 int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
138 #ifdef INVARIANT_SUPPORT
139 void _mtx_assert(struct mtx *m, int what, const char *file, int line);
143 * We define our machine-independent (unoptimized) mutex micro-operations
144 * here, if they are not already defined in the machine-dependent mutex.h
147 /* Actually obtain mtx_lock */
149 #define _obtain_lock(mp, tid) \
150 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
153 /* Actually release mtx_lock */
154 #ifndef _release_lock
155 #define _release_lock(mp, tid) \
156 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
159 /* Actually release mtx_lock quickly, assuming we own it. */
160 #ifndef _release_lock_quick
161 #define _release_lock_quick(mp) \
162 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
166 * Obtain a sleep lock inline, or call the "hard" function if we can't get it
169 #ifndef _get_sleep_lock
170 #define _get_sleep_lock(mp, tid, opts) do { \
171 if (!_obtain_lock((mp), (tid))) \
172 _mtx_lock_sleep((mp), (opts), __FILE__, __LINE__); \
177 * Obtain a spin lock inline, or call the "hard" function if we can't get it
178 * easy. For spinlocks, we handle recursion inline (it turns out that function
179 * calls can be significantly expensive on some architectures).
180 * Since spin locks are not _too_ common, inlining this code is not too big
183 #ifndef _get_spin_lock
184 #define _get_spin_lock(mp, tid, opts) do { \
185 u_int _mtx_intr = save_intr(); \
187 if (!_obtain_lock((mp), (tid))) { \
188 if ((mp)->mtx_lock == (uintptr_t)(tid)) \
189 (mp)->mtx_recurse++; \
191 _mtx_lock_spin((mp), (opts), _mtx_intr, \
192 __FILE__, __LINE__); \
194 (mp)->mtx_saveintr = _mtx_intr; \
199 * Release a sleep lock inline, or call the "hard" function if we can't do it
202 #ifndef _rel_sleep_lock
203 #define _rel_sleep_lock(mp, tid, opts) do { \
204 if (!_release_lock((mp), (tid))) \
205 _mtx_unlock_sleep((mp), (opts), __FILE__, __LINE__); \
210 * For spinlocks, we can handle everything inline, as it's pretty simple and
211 * a function call would be too expensive (at least on some architectures).
212 * Since spin locks are not _too_ common, inlining this code is not too big
215 #ifndef _rel_spin_lock
216 #define _rel_spin_lock(mp) do { \
217 u_int _mtx_intr = (mp)->mtx_saveintr; \
218 if (mtx_recursed((mp))) \
219 (mp)->mtx_recurse--; \
221 _release_lock_quick((mp)); \
222 restore_intr(_mtx_intr); \
228 * Exported lock manipulation interface.
230 * mtx_lock(m) locks MTX_DEF mutex `m'
232 * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
234 * mtx_unlock(m) unlocks MTX_DEF mutex `m'
236 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
238 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
239 * and passes option flags `opts' to the "hard" function, if required.
240 * With these routines, it is possible to pass flags such as MTX_QUIET
241 * and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
243 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
244 * it cannot. Rather, it returns 0 on failure and non-zero on success.
245 * It does NOT handle recursion as we assume that if a caller is properly
246 * using this part of the interface, he will know that the lock in question
249 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
250 * relevant option flags `opts.'
252 * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
254 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
256 #define mtx_lock(m) mtx_lock_flags((m), 0)
257 #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0)
258 #define mtx_trylock(m) mtx_trylock_flags((m), 0)
259 #define mtx_unlock(m) mtx_unlock_flags((m), 0)
260 #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
262 #define mtx_lock_flags(m, opts) do { \
263 MPASS(curproc != NULL); \
264 KASSERT(((opts) & MTX_NOSWITCH) == 0, \
265 ("MTX_NOSWITCH used at %s:%d", __FILE__, __LINE__)); \
266 _get_sleep_lock((m), curproc, (opts)); \
267 if (((opts) & MTX_QUIET) == 0) \
268 CTR5(KTR_LOCK, STR_mtx_lock_slp, \
269 (m)->mtx_description, (m), (m)->mtx_recurse, \
270 __FILE__, __LINE__); \
271 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
275 #define mtx_lock_spin_flags(m, opts) do { \
276 MPASS(curproc != NULL); \
277 _get_spin_lock((m), curproc, (opts)); \
278 if (((opts) & MTX_QUIET) == 0) \
279 CTR5(KTR_LOCK, STR_mtx_lock_spn, \
280 (m)->mtx_description, (m), (m)->mtx_recurse, \
281 __FILE__, __LINE__); \
282 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
286 #define mtx_unlock_flags(m, opts) do { \
287 MPASS(curproc != NULL); \
288 mtx_assert((m), MA_OWNED); \
289 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
291 _rel_sleep_lock((m), curproc, (opts)); \
292 if (((opts) & MTX_QUIET) == 0) \
293 CTR5(KTR_LOCK, STR_mtx_unlock_slp, \
294 (m)->mtx_description, (m), (m)->mtx_recurse, \
295 __FILE__, __LINE__); \
298 #define mtx_unlock_spin_flags(m, opts) do { \
299 MPASS(curproc != NULL); \
300 mtx_assert((m), MA_OWNED); \
301 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
303 _rel_spin_lock((m)); \
304 if (((opts) & MTX_QUIET) == 0) \
305 CTR5(KTR_LOCK, STR_mtx_unlock_spn, \
306 (m)->mtx_description, (m), (m)->mtx_recurse, \
307 __FILE__, __LINE__); \
310 #define mtx_trylock_flags(m, opts) \
311 _mtx_trylock((m), (opts), __FILE__, __LINE__)
313 #define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curproc)
315 #define mtx_recursed(m) ((m)->mtx_recurse != 0)
320 extern struct mtx sched_lock;
321 extern struct mtx Giant;
324 * Giant lock manipulation and clean exit macros.
325 * Used to replace return with an exit Giant and return.
327 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
329 #define DROP_GIANT_NOSWITCH() \
332 WITNESS_SAVE_DECL(Giant); \
334 if (mtx_owned(&Giant)) \
335 WITNESS_SAVE(&Giant, Giant); \
336 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
337 mtx_unlock_flags(&Giant, MTX_NOSWITCH)
339 #define DROP_GIANT() \
342 WITNESS_SAVE_DECL(Giant); \
344 if (mtx_owned(&Giant)) \
345 WITNESS_SAVE(&Giant, Giant); \
346 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
349 #define PICKUP_GIANT() \
350 mtx_assert(&Giant, MA_NOTOWNED); \
351 while (_giantcnt--) \
353 if (mtx_owned(&Giant)) \
354 WITNESS_RESTORE(&Giant, Giant); \
357 #define PARTIAL_PICKUP_GIANT() \
358 mtx_assert(&Giant, MA_NOTOWNED); \
359 while (_giantcnt--) \
361 if (mtx_owned(&Giant)) \
362 WITNESS_RESTORE(&Giant, Giant)
365 * The INVARIANTS-enabled mtx_assert() functionality.
367 * The constants need to be defined for INVARIANT_SUPPORT infrastructure
368 * support as _mtx_assert() itself uses them and the latter implies that
369 * _mtx_assert() must build.
371 #ifdef INVARIANT_SUPPORT
372 #define MA_OWNED 0x01
373 #define MA_NOTOWNED 0x02
374 #define MA_RECURSED 0x04
375 #define MA_NOTRECURSED 0x08
376 #endif /* INVARIANT_SUPPORT */
379 #define mtx_assert(m, what) \
380 _mtx_assert((m), (what), __FILE__, __LINE__)
382 #else /* INVARIANTS */
383 #define mtx_assert(m, what)
384 #endif /* INVARIANTS */
386 #define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__)
387 #define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__)
388 #define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line)
389 #define MPASS4(ex, what, file, line) \
390 KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line))
393 * Exported WITNESS-enabled functions and corresponding wrapper macros.
396 void witness_save(struct mtx *, const char **, int *);
397 void witness_restore(struct mtx *, const char *, int);
398 void witness_enter(struct mtx *, int, const char *, int);
399 void witness_try_enter(struct mtx *, int, const char *, int);
400 void witness_exit(struct mtx *, int, const char *, int);
401 int witness_list(struct proc *);
402 int witness_sleep(int, struct mtx *, const char *, int);
404 #define WITNESS_ENTER(m, t, f, l) \
405 witness_enter((m), (t), (f), (l))
407 #define WITNESS_EXIT(m, t, f, l) \
408 witness_exit((m), (t), (f), (l))
410 #define WITNESS_SLEEP(check, m) \
411 witness_sleep(check, (m), __FILE__, __LINE__)
413 #define WITNESS_SAVE_DECL(n) \
414 const char * __CONCAT(n, __wf); \
415 int __CONCAT(n, __wl)
417 #define WITNESS_SAVE(m, n) \
418 witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl))
420 #define WITNESS_RESTORE(m, n) \
421 witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl))
424 #define witness_enter(m, t, f, l)
425 #define witness_tryenter(m, t, f, l)
426 #define witness_exit(m, t, f, l)
427 #define witness_list(p)
428 #define witness_sleep(c, m, f, l)
430 #define WITNESS_ENTER(m, t, f, l)
431 #define WITNESS_EXIT(m, t, f, l)
432 #define WITNESS_SLEEP(check, m)
433 #define WITNESS_SAVE_DECL(n)
434 #define WITNESS_SAVE(m, n)
435 #define WITNESS_RESTORE(m, n)
440 #endif /* _SYS_MUTEX_H_ */