2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
36 #include <sys/queue.h>
40 #include <machine/atomic.h>
41 #include <machine/cpufunc.h>
42 #include <machine/globals.h>
46 #include <machine/mutex.h>
51 * Mutex types and options stored in mutex->mtx_flags
53 #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
54 #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
55 #define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */
58 * Option flags passed to certain lock/unlock routines, through the use
59 * of corresponding mtx_{lock,unlock}_flags() interface macros.
61 * XXX: The only reason we make these bits not interfere with the above "types
62 * and options" bits is because we have to pass both to the witness
63 * routines right now; if/when we clean up the witness interface to
64 * not check for mutex type from the passed in flag, but rather from
65 * the mutex lock's mtx_flags field, then we can change these values to
68 #define MTX_NOSWITCH 0x00000004 /* Do not switch on release */
69 #define MTX_QUIET 0x00000008 /* Don't log a mutex event */
72 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
73 * with the exception of MTX_UNOWNED, applies to spin locks.
75 #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
76 #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
77 #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
78 #define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED)
90 volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */
91 volatile u_int mtx_recurse; /* number of recursive holds */
92 u_int mtx_saveintr; /* saved flags (for spin locks) */
93 int mtx_flags; /* flags passed to mtx_init() */
94 const char *mtx_description;
95 TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */
96 LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */
97 struct mtx *mtx_next; /* all existing locks */
98 struct mtx *mtx_prev; /* in system... */
99 struct mtx_debug *mtx_debug; /* debugging information... */
103 * XXX: Friendly reminder to fix things in MP code that is presently being
106 #define mp_fixme(string)
113 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
114 * of the kernel via macros, thus allowing us to use the cpp __FILE__
115 * and __LINE__. These functions should not be called directly by any
116 * code using the IPI. Their macros cover their functionality.
118 * [See below for descriptions]
121 void mtx_init(struct mtx *m, const char *description, int opts);
122 void mtx_destroy(struct mtx *m);
123 void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line);
124 void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
125 void _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr,
126 const char *file, int line);
127 void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
128 int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
131 * We define our machine-independent (unoptimized) mutex micro-operations
132 * here, if they are not already defined in the machine-dependent mutex.h
135 /* Actually obtain mtx_lock */
137 #define _obtain_lock(mp, tid) \
138 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
141 /* Actually release mtx_lock */
142 #ifndef _release_lock
143 #define _release_lock(mp, tid) \
144 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
147 /* Actually release mtx_lock quickly, assuming we own it. */
148 #ifndef _release_lock_quick
149 #define _release_lock_quick(mp) \
150 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
154 * Obtain a sleep lock inline, or call the "hard" function if we can't get it
157 #ifndef _get_sleep_lock
158 #define _get_sleep_lock(mp, tid, opts) do { \
159 if (!_obtain_lock((mp), (tid))) \
160 _mtx_lock_sleep((mp), (opts), __FILE__, __LINE__); \
165 * Obtain a spin lock inline, or call the "hard" function if we can't get it
166 * easy. For spinlocks, we handle recursion inline (it turns out that function
167 * calls can be significantly expensive on some architectures).
168 * Since spin locks are not _too_ common, inlining this code is not too big
171 #ifndef _get_spin_lock
172 #define _get_spin_lock(mp, tid, opts) do { \
173 u_int _mtx_intr = save_intr(); \
175 if (!_obtain_lock((mp), (tid))) { \
176 if ((mp)->mtx_lock == (uintptr_t)(tid)) \
177 (mp)->mtx_recurse++; \
179 _mtx_lock_spin((mp), (opts), _mtx_intr, \
180 __FILE__, __LINE__); \
182 (mp)->mtx_saveintr = _mtx_intr; \
187 * Release a sleep lock inline, or call the "hard" function if we can't do it
190 #ifndef _rel_sleep_lock
191 #define _rel_sleep_lock(mp, tid, opts) do { \
192 if (!_release_lock((mp), (tid))) \
193 _mtx_unlock_sleep((mp), (opts), __FILE__, __LINE__); \
198 * For spinlocks, we can handle everything inline, as it's pretty simple and
199 * a function call would be too expensive (at least on some architectures).
200 * Since spin locks are not _too_ common, inlining this code is not too big
203 #ifndef _rel_spin_lock
204 #define _rel_spin_lock(mp) do { \
205 u_int _mtx_intr = (mp)->mtx_saveintr; \
206 if (mtx_recursed((mp))) \
207 (mp)->mtx_recurse--; \
209 _release_lock_quick((mp)); \
210 restore_intr(_mtx_intr); \
216 * Exported lock manipulation interface.
218 * mtx_lock(m) locks MTX_DEF mutex `m'
220 * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
222 * mtx_unlock(m) unlocks MTX_DEF mutex `m'
224 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
226 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
227 * and passes option flags `opts' to the "hard" function, if required.
228 * With these routines, it is possible to pass flags such as MTX_QUIET
229 * and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
231 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
232 * it cannot. Rather, it returns 0 on failure and non-zero on success.
233 * It does NOT handle recursion as we assume that if a caller is properly
234 * using this part of the interface, he will know that the lock in question
237 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
238 * relevant option flags `opts.'
240 * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
242 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
244 #define mtx_lock(m) do { \
245 MPASS(CURPROC != NULL); \
246 _get_sleep_lock((m), CURTHD, 0); \
247 WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \
250 #define mtx_lock_spin(m) do { \
251 MPASS(CURPROC != NULL); \
252 _get_spin_lock((m), CURTHD, 0); \
253 WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \
256 #define mtx_unlock(m) do { \
257 MPASS(CURPROC != NULL); \
258 WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \
259 _rel_sleep_lock((m), CURTHD, 0); \
262 #define mtx_unlock_spin(m) do { \
263 MPASS(CURPROC != NULL); \
264 WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \
265 _rel_spin_lock((m)); \
268 #define mtx_lock_flags(m, opts) do { \
269 MPASS(CURPROC != NULL); \
270 _get_sleep_lock((m), CURTHD, (opts)); \
271 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
275 #define mtx_lock_spin_flags(m, opts) do { \
276 MPASS(CURPROC != NULL); \
277 _get_spin_lock((m), CURTHD, (opts)); \
278 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \
282 #define mtx_unlock_flags(m, opts) do { \
283 MPASS(CURPROC != NULL); \
284 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
286 _rel_sleep_lock((m), CURTHD, (opts)); \
290 * The MTX_SPIN unlock case is all inlined, so we handle the MTX_QUIET
291 * flag right in the macro. Not a problem as if we don't have KTR_LOCK, this
292 * check will be optimized out.
294 #define mtx_unlock_spin_flags(m, opts) do { \
295 MPASS(CURPROC != NULL); \
296 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \
298 if (((opts) & MTX_QUIET) == 0) \
299 CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", \
300 (m)->mtx_description, (m), (m)->mtx_recurse, \
301 __FILE__, __LINE__); \
302 _rel_spin_lock((m)); \
305 #define mtx_trylock(m) \
306 _mtx_trylock((m), 0, __FILE__, __LINE__)
308 #define mtx_trylock_flags(m, opts) \
309 _mtx_trylock((m), (opts), __FILE__, __LINE__)
311 #define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
313 #define mtx_recursed(m) ((m)->mtx_recurse != 0)
318 extern struct mtx sched_lock;
319 extern struct mtx Giant;
322 * Giant lock manipulation and clean exit macros.
323 * Used to replace return with an exit Giant and return.
325 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
327 #define DROP_GIANT_NOSWITCH() \
330 WITNESS_SAVE_DECL(Giant); \
332 if (mtx_owned(&Giant)) \
333 WITNESS_SAVE(&Giant, Giant); \
334 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
335 mtx_unlock_flags(&Giant, MTX_NOSWITCH)
337 #define DROP_GIANT() \
340 WITNESS_SAVE_DECL(Giant); \
342 if (mtx_owned(&Giant)) \
343 WITNESS_SAVE(&Giant, Giant); \
344 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
347 #define PICKUP_GIANT() \
348 mtx_assert(&Giant, MA_NOTOWNED); \
349 while (_giantcnt--) \
351 if (mtx_owned(&Giant)) \
352 WITNESS_RESTORE(&Giant, Giant); \
355 #define PARTIAL_PICKUP_GIANT() \
356 mtx_assert(&Giant, MA_NOTOWNED); \
357 while (_giantcnt--) \
359 if (mtx_owned(&Giant)) \
360 WITNESS_RESTORE(&Giant, Giant)
363 * The INVARIANTS-enabled mtx_assert() functionality.
366 #define MA_OWNED 0x01
367 #define MA_NOTOWNED 0x02
368 #define MA_RECURSED 0x04
369 #define MA_NOTRECURSED 0x08
371 void _mtx_assert(struct mtx *m, int what, const char *file, int line);
372 #define mtx_assert(m, what) \
373 _mtx_assert((m), (what), __FILE__, __LINE__)
375 #else /* INVARIANTS */
376 #define mtx_assert(m, what)
377 #endif /* INVARIANTS */
380 * The MUTEX_DEBUG-enabled MPASS*() extra sanity-check macros.
385 panic("Assertion %s failed at %s:%d", #ex, __FILE__, \
388 #define MPASS2(ex, what) \
390 panic("Assertion %s failed at %s:%d", what, __FILE__, \
393 #define MPASS3(ex, file, line) \
395 panic("Assertion %s failed at %s:%d", #ex, file, line)
397 #define MPASS4(ex, what, file, line) \
399 panic("Assertion %s failed at %s:%d", what, file, line)
401 #else /* MUTEX_DEBUG */
403 #define MPASS2(ex, what)
404 #define MPASS3(ex, file, line)
405 #define MPASS4(ex, what, file, line)
406 #endif /* MUTEX_DEBUG */
409 * Exported WITNESS-enabled functions and corresponding wrapper macros.
412 void witness_save(struct mtx *, const char **, int *);
413 void witness_restore(struct mtx *, const char *, int);
414 void witness_enter(struct mtx *, int, const char *, int);
415 void witness_try_enter(struct mtx *, int, const char *, int);
416 void witness_exit(struct mtx *, int, const char *, int);
417 int witness_list(struct proc *);
418 int witness_sleep(int, struct mtx *, const char *, int);
420 #define WITNESS_ENTER(m, t, f, l) \
421 witness_enter((m), (t), (f), (l))
423 #define WITNESS_EXIT(m, t, f, l) \
424 witness_exit((m), (t), (f), (l))
426 #define WITNESS_SLEEP(check, m) \
427 witness_sleep(check, (m), __FILE__, __LINE__)
429 #define WITNESS_SAVE_DECL(n) \
430 const char * __CONCAT(n, __wf); \
431 int __CONCAT(n, __wl)
433 #define WITNESS_SAVE(m, n) \
434 witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl))
436 #define WITNESS_RESTORE(m, n) \
437 witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl))
440 #define witness_enter(m, t, f, l)
441 #define witness_tryenter(m, t, f, l)
442 #define witness_exit(m, t, f, l)
443 #define witness_list(p)
444 #define witness_sleep(c, m, f, l)
446 #define WITNESS_ENTER(m, t, f, l)
447 #define WITNESS_EXIT(m, t, f, l)
448 #define WITNESS_SLEEP(check, m)
449 #define WITNESS_SAVE_DECL(n)
450 #define WITNESS_SAVE(m, n)
451 #define WITNESS_RESTORE(m, n)
456 #endif /* _SYS_MUTEX_H_ */