2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 * promote products derived from this software without specific prior
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
37 #include <sys/queue.h>
38 #include <sys/_lock.h>
39 #include <sys/_mutex.h>
43 #include <sys/lock_profile.h>
44 #include <sys/lockstat.h>
45 #include <machine/atomic.h>
46 #include <machine/cpufunc.h>
49 * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK
50 * can also be passed in.
52 #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
53 #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
54 #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */
55 #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */
56 #define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */
57 #define MTX_NEW 0x00000040 /* Don't check for double-init */
60 * Option flags passed to certain lock/unlock routines, through the use
61 * of corresponding mtx_{lock,unlock}_flags() interface macros.
63 #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
64 #define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */
67 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
68 * with the exception of MTX_UNOWNED, applies to spin locks.
70 #define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */
71 #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
72 #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
73 #define MTX_DESTROYED 0x00000004 /* lock destroyed */
74 #define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED)
79 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
80 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
81 * and LOCK_LINE or for hiding the lock cookie crunching to the
82 * consumers. These functions should not be called directly by any
83 * code using the API. Their macros cover their functionality.
84 * Functions with a `_' suffix are the entrypoint for the common
85 * KPI covering both compat shims and fast path case. These can be
86 * used by consumers willing to pass options, file and line
87 * informations, in an option-independent way.
89 * [See below for descriptions]
92 void _mtx_init(volatile uintptr_t *c, const char *name, const char *type,
94 void _mtx_destroy(volatile uintptr_t *c);
95 void mtx_sysinit(void *arg);
96 int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF);
97 int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
99 void mutex_init(void);
101 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
102 const char *file, int line);
103 void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
104 const char *file, int line);
106 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v);
107 void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v);
112 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
113 const char *file, int line);
115 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v);
118 void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file,
120 void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file,
122 void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
124 int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts,
125 const char *file, int line);
126 void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts,
127 const char *file, int line);
128 void mtx_spin_wait_unlocked(struct mtx *m);
130 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
131 void __mtx_assert(const volatile uintptr_t *c, int what, const char *file,
134 void thread_lock_flags_(struct thread *, int, const char *, int);
136 void _thread_lock(struct thread *td, int opts, const char *file, int line);
138 void _thread_lock(struct thread *);
141 #if defined(LOCK_PROFILING) || (defined(KLD_MODULE) && !defined(KLD_TIED))
142 #define thread_lock(tdp) \
143 thread_lock_flags_((tdp), 0, __FILE__, __LINE__)
145 #define thread_lock(tdp) \
146 _thread_lock((tdp), 0, __FILE__, __LINE__)
148 #define thread_lock(tdp) \
153 #define thread_lock_flags(tdp, opt) \
154 thread_lock_flags_((tdp), (opt), __FILE__, __LINE__)
156 #define thread_lock_flags(tdp, opt) \
160 #define thread_unlock(tdp) \
161 mtx_unlock_spin((tdp)->td_lock)
164 * Top-level macros to provide lock cookie once the actual mtx is passed.
165 * They will also prevent passing a malformed object to the mtx KPI by
166 * failing compilation as the mtx_lock reserved member will not be found.
168 #define mtx_init(m, n, t, o) \
169 _mtx_init(&(m)->mtx_lock, n, t, o)
170 #define mtx_destroy(m) \
171 _mtx_destroy(&(m)->mtx_lock)
172 #define mtx_trylock_flags_(m, o, f, l) \
173 _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
175 #define _mtx_lock_sleep(m, v, o, f, l) \
176 __mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l)
177 #define _mtx_unlock_sleep(m, v, o, f, l) \
178 __mtx_unlock_sleep(&(m)->mtx_lock, v, o, f, l)
180 #define _mtx_lock_sleep(m, v, o, f, l) \
181 __mtx_lock_sleep(&(m)->mtx_lock, v)
182 #define _mtx_unlock_sleep(m, v, o, f, l) \
183 __mtx_unlock_sleep(&(m)->mtx_lock, v)
187 #define _mtx_lock_spin(m, v, o, f, l) \
188 _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l)
190 #define _mtx_lock_spin(m, v, o, f, l) \
191 _mtx_lock_spin_cookie(&(m)->mtx_lock, v)
194 #define _mtx_lock_flags(m, o, f, l) \
195 __mtx_lock_flags(&(m)->mtx_lock, o, f, l)
196 #define _mtx_unlock_flags(m, o, f, l) \
197 __mtx_unlock_flags(&(m)->mtx_lock, o, f, l)
198 #define _mtx_lock_spin_flags(m, o, f, l) \
199 __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l)
200 #define _mtx_trylock_spin_flags(m, o, f, l) \
201 __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l)
202 #define _mtx_unlock_spin_flags(m, o, f, l) \
203 __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l)
204 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
205 #define _mtx_assert(m, w, f, l) \
206 __mtx_assert(&(m)->mtx_lock, w, f, l)
209 #define mtx_recurse lock_object.lo_data
211 /* Very simple operations on mtx_lock. */
213 /* Try to obtain mtx_lock once. */
214 #define _mtx_obtain_lock(mp, tid) \
215 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid))
217 #define _mtx_obtain_lock_fetch(mp, vp, tid) \
218 atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid))
220 /* Try to release mtx_lock if it is unrecursed and uncontested. */
221 #define _mtx_release_lock(mp, tid) \
222 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
224 /* Release mtx_lock quickly, assuming we own it. */
225 #define _mtx_release_lock_quick(mp) \
226 atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
228 #define _mtx_release_lock_fetch(mp, vp) \
229 atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED)
232 * Full lock operations that are suitable to be inlined in non-debug
233 * kernels. If the lock cannot be acquired or released trivially then
234 * the work is deferred to another function.
237 /* Lock a normal mutex. */
238 #define __mtx_lock(mp, tid, opts, file, line) do { \
239 uintptr_t _tid = (uintptr_t)(tid); \
240 uintptr_t _v = MTX_UNOWNED; \
242 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\
243 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
244 _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \
248 * Lock a spin mutex. For spinlocks, we handle recursion inline (it
249 * turns out that function calls can be significantly expensive on
250 * some architectures). Since spin locks are not _too_ common,
251 * inlining this code is not too big a deal.
254 #define __mtx_lock_spin(mp, tid, opts, file, line) do { \
255 uintptr_t _tid = (uintptr_t)(tid); \
256 uintptr_t _v = MTX_UNOWNED; \
259 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \
260 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
261 _mtx_lock_spin((mp), _v, (opts), (file), (line)); \
263 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \
264 uintptr_t _tid = (uintptr_t)(tid); \
268 if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\
272 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \
273 mp, 0, 0, file, line); \
279 #define __mtx_lock_spin(mp, tid, opts, file, line) do { \
280 uintptr_t _tid = (uintptr_t)(tid); \
283 if ((mp)->mtx_lock == _tid) \
284 (mp)->mtx_recurse++; \
286 KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
287 (mp)->mtx_lock = _tid; \
290 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \
291 uintptr_t _tid = (uintptr_t)(tid); \
295 if ((mp)->mtx_lock != MTX_UNOWNED) { \
299 (mp)->mtx_lock = _tid; \
306 /* Unlock a normal mutex. */
307 #define __mtx_unlock(mp, tid, opts, file, line) do { \
308 uintptr_t _v = (uintptr_t)(tid); \
310 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\
311 !_mtx_release_lock_fetch((mp), &_v))) \
312 _mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \
316 * Unlock a spin mutex. For spinlocks, we can handle everything
317 * inline, as it's pretty simple and a function call would be too
318 * expensive (at least on some architectures). Since spin locks are
319 * not _too_ common, inlining this code is not too big a deal.
321 * Since we always perform a spinlock_enter() when attempting to acquire a
322 * spin lock, we need to always perform a matching spinlock_exit() when
323 * releasing a spin lock. This includes the recursion cases.
326 #define __mtx_unlock_spin(mp) do { \
327 if (mtx_recursed((mp))) \
328 (mp)->mtx_recurse--; \
330 LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \
331 _mtx_release_lock_quick((mp)); \
336 #define __mtx_unlock_spin(mp) do { \
337 if (mtx_recursed((mp))) \
338 (mp)->mtx_recurse--; \
340 LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \
341 (mp)->mtx_lock = MTX_UNOWNED; \
348 * Exported lock manipulation interface.
350 * mtx_lock(m) locks MTX_DEF mutex `m'
352 * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
354 * mtx_unlock(m) unlocks MTX_DEF mutex `m'
356 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
358 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
359 * and passes option flags `opts' to the "hard" function, if required.
360 * With these routines, it is possible to pass flags such as MTX_QUIET
361 * to the appropriate lock manipulation routines.
363 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
364 * it cannot. Rather, it returns 0 on failure and non-zero on success.
365 * It does NOT handle recursion as we assume that if a caller is properly
366 * using this part of the interface, he will know that the lock in question
369 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
370 * relevant option flags `opts.'
372 * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't
373 * spin if it cannot. Rather, it returns 0 on failure and non-zero on
374 * success. It always returns failure for recursed lock attempts.
376 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized.
378 * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
380 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
382 #define mtx_lock(m) mtx_lock_flags((m), 0)
383 #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0)
384 #define mtx_trylock(m) mtx_trylock_flags((m), 0)
385 #define mtx_trylock_spin(m) mtx_trylock_spin_flags((m), 0)
386 #define mtx_unlock(m) mtx_unlock_flags((m), 0)
387 #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
391 struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts);
392 void mtx_pool_destroy(struct mtx_pool **poolp);
393 struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr);
394 struct mtx *mtx_pool_alloc(struct mtx_pool *pool);
395 #define mtx_pool_lock(pool, ptr) \
396 mtx_lock(mtx_pool_find((pool), (ptr)))
397 #define mtx_pool_lock_spin(pool, ptr) \
398 mtx_lock_spin(mtx_pool_find((pool), (ptr)))
399 #define mtx_pool_unlock(pool, ptr) \
400 mtx_unlock(mtx_pool_find((pool), (ptr)))
401 #define mtx_pool_unlock_spin(pool, ptr) \
402 mtx_unlock_spin(mtx_pool_find((pool), (ptr)))
405 * mtxpool_sleep is a general purpose pool of sleep mutexes.
407 extern struct mtx_pool *mtxpool_sleep;
410 #error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
412 #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
413 #define mtx_lock_flags_(m, opts, file, line) \
414 _mtx_lock_flags((m), (opts), (file), (line))
415 #define mtx_unlock_flags_(m, opts, file, line) \
416 _mtx_unlock_flags((m), (opts), (file), (line))
417 #define mtx_lock_spin_flags_(m, opts, file, line) \
418 _mtx_lock_spin_flags((m), (opts), (file), (line))
419 #define mtx_trylock_spin_flags_(m, opts, file, line) \
420 _mtx_trylock_spin_flags((m), (opts), (file), (line))
421 #define mtx_unlock_spin_flags_(m, opts, file, line) \
422 _mtx_unlock_spin_flags((m), (opts), (file), (line))
423 #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */
424 #define mtx_lock_flags_(m, opts, file, line) \
425 __mtx_lock((m), curthread, (opts), (file), (line))
426 #define mtx_unlock_flags_(m, opts, file, line) \
427 __mtx_unlock((m), curthread, (opts), (file), (line))
428 #define mtx_lock_spin_flags_(m, opts, file, line) \
429 __mtx_lock_spin((m), curthread, (opts), (file), (line))
430 #define mtx_trylock_spin_flags_(m, opts, file, line) \
431 __mtx_trylock_spin((m), curthread, (opts), (file), (line))
432 #define mtx_unlock_spin_flags_(m, opts, file, line) \
433 __mtx_unlock_spin((m))
434 #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */
437 #define mtx_assert_(m, what, file, line) \
438 _mtx_assert((m), (what), (file), (line))
440 #define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__)
442 #else /* INVARIANTS */
443 #define mtx_assert_(m, what, file, line) (void)0
444 #define GIANT_REQUIRED
445 #endif /* INVARIANTS */
447 #define mtx_lock_flags(m, opts) \
448 mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
449 #define mtx_unlock_flags(m, opts) \
450 mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
451 #define mtx_lock_spin_flags(m, opts) \
452 mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
453 #define mtx_unlock_spin_flags(m, opts) \
454 mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
455 #define mtx_trylock_flags(m, opts) \
456 mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
457 #define mtx_trylock_spin_flags(m, opts) \
458 mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
459 #define mtx_assert(m, what) \
460 mtx_assert_((m), (what), __FILE__, __LINE__)
462 #define mtx_sleep(chan, mtx, pri, wmesg, timo) \
463 _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \
464 tick_sbt * (timo), 0, C_HARDCLOCK)
466 #define MTX_READ_VALUE(m) ((m)->mtx_lock)
468 #define mtx_initialized(m) lock_initialized(&(m)->lock_object)
470 #define lv_mtx_owner(v) ((struct thread *)((v) & ~MTX_FLAGMASK))
472 #define mtx_owner(m) lv_mtx_owner(MTX_READ_VALUE(m))
474 #define mtx_owned(m) (mtx_owner(m) == curthread)
476 #define mtx_recursed(m) ((m)->mtx_recurse != 0)
478 #define mtx_name(m) ((m)->lock_object.lo_name)
483 extern struct mtx Giant;
484 extern struct mtx blocked_lock;
487 * Giant lock manipulation and clean exit macros.
488 * Used to replace return with an exit Giant and return.
490 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
491 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT.
494 #define DROP_GIANT() \
497 WITNESS_SAVE_DECL(Giant); \
499 if (__predict_false(mtx_owned(&Giant))) { \
500 WITNESS_SAVE(&Giant.lock_object, Giant); \
501 for (_giantcnt = 0; mtx_owned(&Giant) && \
502 !SCHEDULER_STOPPED(); _giantcnt++) \
503 mtx_unlock(&Giant); \
506 #define PICKUP_GIANT() \
507 PARTIAL_PICKUP_GIANT(); \
510 #define PARTIAL_PICKUP_GIANT() \
511 mtx_assert(&Giant, MA_NOTOWNED); \
512 if (__predict_false(_giantcnt > 0)) { \
513 while (_giantcnt--) \
515 WITNESS_RESTORE(&Giant.lock_object, Giant); \
525 #define MTX_SYSINIT(name, mtx, desc, opts) \
526 static struct mtx_args name##_args = { \
531 SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
532 mtx_sysinit, &name##_args); \
533 SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
534 _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock))
537 * The INVARIANTS-enabled mtx_assert() functionality.
539 * The constants need to be defined for INVARIANT_SUPPORT infrastructure
540 * support as _mtx_assert() itself uses them and the latter implies that
541 * _mtx_assert() must build.
543 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
544 #define MA_OWNED LA_XLOCKED
545 #define MA_NOTOWNED LA_UNLOCKED
546 #define MA_RECURSED LA_RECURSED
547 #define MA_NOTRECURSED LA_NOTRECURSED
551 * Common lock type names.
553 #define MTX_NETWORK_LOCK "network driver"
556 #endif /* _SYS_MUTEX_H_ */