2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
36 #include <sys/queue.h>
37 #include <sys/_lock.h>
38 #include <sys/_mutex.h>
42 #include <machine/atomic.h>
43 #include <machine/cpufunc.h>
44 #include <machine/globals.h>
48 #include <machine/mutex.h>
53 * Mutex types and options passed to mtx_init(). MTX_QUIET can also be
56 #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
57 #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
58 #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */
59 #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */
60 #define MTX_SLEEPABLE 0x00000010 /* We can sleep with this lock. */
63 * Option flags passed to certain lock/unlock routines, through the use
64 * of corresponding mtx_{lock,unlock}_flags() interface macros.
66 #define MTX_NOSWITCH LOP_NOSWITCH /* Do not switch on release */
67 #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
70 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
71 * with the exception of MTX_UNOWNED, applies to spin locks.
73 #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
74 #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
75 #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
76 #define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED)
83 * XXX: Friendly reminder to fix things in MP code that is presently being
86 #define mp_fixme(string)
93 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
94 * of the kernel via macros, thus allowing us to use the cpp __FILE__
95 * and __LINE__. These functions should not be called directly by any
96 * code using the API. Their macros cover their functionality.
98 * [See below for descriptions]
101 void mtx_init(struct mtx *m, const char *description, int opts);
102 void mtx_destroy(struct mtx *m);
103 void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line);
104 void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
105 void _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit,
106 const char *file, int line);
107 void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
108 int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
109 void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
110 void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line);
111 void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file,
113 void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
115 #ifdef INVARIANT_SUPPORT
116 void _mtx_assert(struct mtx *m, int what, const char *file, int line);
120 * We define our machine-independent (unoptimized) mutex micro-operations
121 * here, if they are not already defined in the machine-dependent mutex.h
124 /* Actually obtain mtx_lock */
126 #define _obtain_lock(mp, tid) \
127 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
130 /* Actually release mtx_lock */
131 #ifndef _release_lock
132 #define _release_lock(mp, tid) \
133 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
136 /* Actually release mtx_lock quickly, assuming we own it. */
137 #ifndef _release_lock_quick
138 #define _release_lock_quick(mp) \
139 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
143 * Obtain a sleep lock inline, or call the "hard" function if we can't get it
146 #ifndef _get_sleep_lock
147 #define _get_sleep_lock(mp, tid, opts, file, line) do { \
148 if (!_obtain_lock((mp), (tid))) \
149 _mtx_lock_sleep((mp), (opts), (file), (line)); \
154 * Obtain a spin lock inline, or call the "hard" function if we can't get it
155 * easy. For spinlocks, we handle recursion inline (it turns out that function
156 * calls can be significantly expensive on some architectures).
157 * Since spin locks are not _too_ common, inlining this code is not too big
160 #ifndef _get_spin_lock
161 #define _get_spin_lock(mp, tid, opts, file, line) do { \
162 critical_t _mtx_crit; \
163 _mtx_crit = critical_enter(); \
164 if (!_obtain_lock((mp), (tid))) { \
165 if ((mp)->mtx_lock == (uintptr_t)(tid)) \
166 (mp)->mtx_recurse++; \
168 _mtx_lock_spin((mp), (opts), _mtx_crit, (file), \
171 (mp)->mtx_savecrit = _mtx_crit; \
176 * Release a sleep lock inline, or call the "hard" function if we can't do it
179 #ifndef _rel_sleep_lock
180 #define _rel_sleep_lock(mp, tid, opts, file, line) do { \
181 if (!_release_lock((mp), (tid))) \
182 _mtx_unlock_sleep((mp), (opts), (file), (line)); \
187 * For spinlocks, we can handle everything inline, as it's pretty simple and
188 * a function call would be too expensive (at least on some architectures).
189 * Since spin locks are not _too_ common, inlining this code is not too big
192 #ifndef _rel_spin_lock
193 #define _rel_spin_lock(mp) do { \
194 critical_t _mtx_crit = (mp)->mtx_savecrit; \
195 if (mtx_recursed((mp))) \
196 (mp)->mtx_recurse--; \
198 _release_lock_quick((mp)); \
199 critical_exit(_mtx_crit); \
205 * Exported lock manipulation interface.
207 * mtx_lock(m) locks MTX_DEF mutex `m'
209 * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
211 * mtx_unlock(m) unlocks MTX_DEF mutex `m'
213 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
215 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
216 * and passes option flags `opts' to the "hard" function, if required.
217 * With these routines, it is possible to pass flags such as MTX_QUIET
218 * and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
220 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
221 * it cannot. Rather, it returns 0 on failure and non-zero on success.
222 * It does NOT handle recursion as we assume that if a caller is properly
223 * using this part of the interface, he will know that the lock in question
226 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
227 * relevant option flags `opts.'
229 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized.
231 * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
233 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
235 #define mtx_lock(m) mtx_lock_flags((m), 0)
236 #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0)
237 #define mtx_trylock(m) mtx_trylock_flags((m), 0)
238 #define mtx_unlock(m) mtx_unlock_flags((m), 0)
239 #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
242 #define mtx_lock_flags(m, opts) \
243 _mtx_lock_flags((m), (opts), __FILE__, __LINE__)
244 #define mtx_unlock_flags(m, opts) \
245 _mtx_unlock_flags((m), (opts), __FILE__, __LINE__)
246 #define mtx_lock_spin_flags(m, opts) \
247 _mtx_lock_spin_flags((m), (opts), __FILE__, __LINE__)
248 #define mtx_unlock_spin_flags(m, opts) \
249 _mtx_unlock_spin_flags((m), (opts), __FILE__, __LINE__)
251 #define mtx_lock_flags(m, opts) \
252 __mtx_lock_flags((m), (opts), __FILE__, __LINE__)
253 #define mtx_unlock_flags(m, opts) \
254 __mtx_unlock_flags((m), (opts), __FILE__, __LINE__)
255 #define mtx_lock_spin_flags(m, opts) \
256 __mtx_lock_spin_flags((m), (opts), __FILE__, __LINE__)
257 #define mtx_unlock_spin_flags(m, opts) \
258 __mtx_unlock_spin_flags((m), (opts), __FILE__, __LINE__)
261 #define __mtx_lock_flags(m, opts, file, line) do { \
262 MPASS(curproc != NULL); \
263 KASSERT(((opts) & MTX_NOSWITCH) == 0, \
264 ("MTX_NOSWITCH used at %s:%d", (file), (line))); \
265 _get_sleep_lock((m), curproc, (opts), (file), (line)); \
266 LOCK_LOG_LOCK("LOCK", &(m)->mtx_object, opts, m->mtx_recurse, \
268 WITNESS_LOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, (file), \
272 #define __mtx_lock_spin_flags(m, opts, file, line) do { \
273 MPASS(curproc != NULL); \
274 _get_spin_lock((m), curproc, (opts), (file), (line)); \
275 LOCK_LOG_LOCK("LOCK", &(m)->mtx_object, opts, m->mtx_recurse, \
277 WITNESS_LOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, (file), \
281 #define __mtx_unlock_flags(m, opts, file, line) do { \
282 MPASS(curproc != NULL); \
283 mtx_assert((m), MA_OWNED); \
284 WITNESS_UNLOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, \
286 LOCK_LOG_LOCK("UNLOCK", &(m)->mtx_object, (opts), \
287 (m)->mtx_recurse, (file), (line)); \
288 _rel_sleep_lock((m), curproc, (opts), (file), (line)); \
291 #define __mtx_unlock_spin_flags(m, opts, file, line) do { \
292 MPASS(curproc != NULL); \
293 mtx_assert((m), MA_OWNED); \
294 WITNESS_UNLOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, \
296 LOCK_LOG_LOCK("UNLOCK", &(m)->mtx_object, (opts), \
297 (m)->mtx_recurse, (file), (line)); \
298 _rel_spin_lock((m)); \
301 #define mtx_trylock_flags(m, opts) \
302 _mtx_trylock((m), (opts), __FILE__, __LINE__)
304 #define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED)
306 #define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curproc)
308 #define mtx_recursed(m) ((m)->mtx_recurse != 0)
313 extern struct mtx sched_lock;
314 extern struct mtx Giant;
317 * Giant lock manipulation and clean exit macros.
318 * Used to replace return with an exit Giant and return.
320 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
322 #define DROP_GIANT_NOSWITCH() \
325 WITNESS_SAVE_DECL(Giant); \
327 if (mtx_owned(&Giant)) \
328 WITNESS_SAVE(&Giant.mtx_object, Giant); \
329 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
330 mtx_unlock_flags(&Giant, MTX_NOSWITCH)
332 #define DROP_GIANT() \
335 WITNESS_SAVE_DECL(Giant); \
337 if (mtx_owned(&Giant)) \
338 WITNESS_SAVE(&Giant.mtx_object, Giant); \
339 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
342 #define PICKUP_GIANT() \
343 mtx_assert(&Giant, MA_NOTOWNED); \
344 while (_giantcnt--) \
346 if (mtx_owned(&Giant)) \
347 WITNESS_RESTORE(&Giant.mtx_object, Giant); \
350 #define PARTIAL_PICKUP_GIANT() \
351 mtx_assert(&Giant, MA_NOTOWNED); \
352 while (_giantcnt--) \
354 if (mtx_owned(&Giant)) \
355 WITNESS_RESTORE(&Giant.mtx_object, Giant)
358 * The INVARIANTS-enabled mtx_assert() functionality.
360 * The constants need to be defined for INVARIANT_SUPPORT infrastructure
361 * support as _mtx_assert() itself uses them and the latter implies that
362 * _mtx_assert() must build.
364 #ifdef INVARIANT_SUPPORT
365 #define MA_OWNED 0x01
366 #define MA_NOTOWNED 0x02
367 #define MA_RECURSED 0x04
368 #define MA_NOTRECURSED 0x08
369 #endif /* INVARIANT_SUPPORT */
372 #define mtx_assert(m, what) \
373 _mtx_assert((m), (what), __FILE__, __LINE__)
376 * GIANT_IRRELEVANT - empty place mark assertion for system startup code
377 * where serialization is implied or utterly trivial
378 * routines that do not need giant.
380 * GIANT_REQUIRED - Giant must be held on entry
382 * *_GIANT_DEPRECATED - Giant may or may not be held, we may hold giant here
383 * based on a sysctl, and no deeper subroutine
386 * *_GIANT_OPTIONAL - Giant may or may not be held and no deeper subroutine
390 #define GIANT_IRRELEVANT
391 #define GIANT_REQUIRED \
393 KASSERT(curproc->p_giant_optional == 0, ("Giant not optional at %s: %d", __FILE__, __LINE__)); \
394 mtx_assert(&Giant, MA_OWNED); \
396 #define START_GIANT_DEPRECATED(sysctlvar) \
397 int __gotgiant = (curproc->p_giant_optional == 0 && sysctlvar) ? \
398 (mtx_lock(&Giant), 1) : 0
399 #define END_GIANT_DEPRECATED \
400 if (__gotgiant) mtx_unlock(&Giant)
401 #define START_GIANT_OPTIONAL \
402 ++curproc->p_giant_optional
403 #define END_GIANT_OPTIONAL \
404 --curproc->p_giant_optional
406 #else /* INVARIANTS */
407 #define mtx_assert(m, what)
408 #define GIANT_IRRELEVANT
409 #define GIANT_REQUIRED
410 #define START_GIANT_DEPRECATED(sysctl)
411 #define END_GIANT_DEPRECATED
412 #define START_GIANT_OPTIONAL
413 #define END_GIANT_OPTIONAL
414 #endif /* INVARIANTS */
418 #endif /* _SYS_MUTEX_H_ */