2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
36 #include <sys/queue.h>
40 #include <machine/atomic.h>
41 #include <machine/cpufunc.h>
42 #include <machine/globals.h>
46 #include <machine/mutex.h>
52 * If kern_mutex.c is being built, compile non-inlined versions of various
53 * functions so that kernel modules can use them.
55 #ifndef _KERN_MUTEX_C_
56 #define _MTX_INLINE static __inline
66 #define MTX_DEF 0x0 /* Default (spin/sleep) */
67 #define MTX_SPIN 0x1 /* Spin only lock */
70 #define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
71 #define MTX_NORECURSE 0x8 /* No recursion possible */
72 #define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
73 #define MTX_NOSWITCH 0x20 /* Do not switch on release */
74 #define MTX_FIRST 0x40 /* First spin lock holder */
75 #define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
76 #define MTX_COLD 0x100 /* Mutex init'd before malloc works */
78 /* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
79 #define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
81 /* Flags/value used in mtx_lock */
82 #define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
83 #define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
84 #define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
85 #define MTX_UNOWNED 0x8 /* Cookie for free mutex */
91 /* If you add anything here, adjust the mtxf_t definition below */
92 struct witness *mtxd_witness;
93 LIST_ENTRY(mtx) mtxd_held;
94 const char *mtxd_file;
96 const char *mtxd_description;
99 #define mtx_description mtx_debug->mtxd_description
100 #define mtx_held mtx_debug->mtxd_held
101 #define mtx_line mtx_debug->mtxd_line
102 #define mtx_file mtx_debug->mtxd_file
103 #define mtx_witness mtx_debug->mtxd_witness
110 volatile uintptr_t mtx_lock; /* lock owner/gate/flags */
111 volatile u_int mtx_recurse; /* number of recursive holds */
112 u_int mtx_saveintr; /* saved flags (for spin locks) */
114 struct mtx_debug *mtx_debug;
116 const char *mtx_description;
118 TAILQ_HEAD(, proc) mtx_blocked;
119 LIST_ENTRY(mtx) mtx_contested;
120 struct mtx *mtx_next; /* all locks in system */
121 struct mtx *mtx_prev;
125 #define MUTEX_DECLARE(modifiers, name) \
126 static struct mtx_debug __mtx_debug_##name; \
127 modifiers struct mtx name = { 0, 0, 0, &__mtx_debug_##name }
129 #define MUTEX_DECLARE(modifiers, name) modifiers struct mtx name
132 #define mp_fixme(string)
136 void mtx_init(struct mtx *m, const char *description, int flag);
137 void mtx_enter_hard(struct mtx *, int type, int saveintr);
138 void mtx_exit_hard(struct mtx *, int type);
139 void mtx_destroy(struct mtx *m);
142 * Wrap the following functions with cpp macros so that filenames and line
143 * numbers are embedded in the code correctly.
145 #if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
146 void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
147 int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
148 void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
151 #define mtx_enter(mtxp, type) \
152 _mtx_enter((mtxp), (type), __FILE__, __LINE__)
154 #define mtx_try_enter(mtxp, type) \
155 _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
157 #define mtx_exit(mtxp, type) \
158 _mtx_exit((mtxp), (type), __FILE__, __LINE__)
161 extern struct mtx sched_lock;
162 extern struct mtx Giant;
165 * Used to replace return with an exit Giant and return.
170 mtx_exit(&Giant, MTX_DEF); \
176 mtx_exit(&Giant, MTX_DEF); \
180 #define DROP_GIANT_NOSWITCH() \
183 WITNESS_SAVE_DECL(Giant); \
185 if (mtx_owned(&Giant)) \
186 WITNESS_SAVE(&Giant, Giant); \
187 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
188 mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH)
190 #define DROP_GIANT() \
193 WITNESS_SAVE_DECL(Giant); \
195 if (mtx_owned(&Giant)) \
196 WITNESS_SAVE(&Giant, Giant); \
197 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
198 mtx_exit(&Giant, MTX_DEF)
200 #define PICKUP_GIANT() \
201 mtx_assert(&Giant, MA_NOTOWNED); \
202 while (_giantcnt--) \
203 mtx_enter(&Giant, MTX_DEF); \
204 if (mtx_owned(&Giant)) \
205 WITNESS_RESTORE(&Giant, Giant); \
208 #define PARTIAL_PICKUP_GIANT() \
209 mtx_assert(&Giant, MA_NOTOWNED); \
210 while (_giantcnt--) \
211 mtx_enter(&Giant, MTX_DEF); \
212 if (mtx_owned(&Giant)) \
213 WITNESS_RESTORE(&Giant, Giant)
221 #define MA_NOTOWNED 2
222 #define MA_RECURSED 4
223 #define MA_NOTRECURSED 8
224 #define mtx_assert(m, what) do { \
227 case MA_OWNED | MA_RECURSED: \
228 case MA_OWNED | MA_NOTRECURSED: \
229 if (!mtx_owned((m))) \
230 panic("mutex %s not owned at %s:%d", \
231 (m)->mtx_description, __FILE__, __LINE__); \
232 if (mtx_recursed((m))) { \
233 if (((what) & MA_NOTRECURSED) != 0) \
234 panic("mutex %s recursed at %s:%d", \
235 (m)->mtx_description, __FILE__, __LINE__); \
236 } else if (((what) & MA_RECURSED) != 0) \
237 panic("mutex %s unrecursed at %s:%d", \
238 (m)->mtx_description, __FILE__, __LINE__); \
241 if (mtx_owned((m))) \
242 panic("mutex %s owned at %s:%d", \
243 (m)->mtx_description, __FILE__, __LINE__); \
246 panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
249 #else /* INVARIANTS */
250 #define mtx_assert(m, what)
251 #endif /* INVARIANTS */
254 #define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
255 #ex, __FILE__, __LINE__)
256 #define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
257 what, __FILE__, __LINE__)
259 #else /* MUTEX_DEBUG */
261 #define MPASS2(ex, where)
262 #endif /* MUTEX_DEBUG */
265 #define WITNESS_ENTER(m, t, f, l) \
266 if ((m)->mtx_witness != NULL) \
267 witness_enter((m), (t), (f), (l))
268 #define WITNESS_EXIT(m, t, f, l) \
269 if ((m)->mtx_witness != NULL) \
270 witness_exit((m), (t), (f), (l))
272 #define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
273 #define WITNESS_SAVE_DECL(n) \
274 const char * __CONCAT(n, __wf); \
275 int __CONCAT(n, __wl)
277 #define WITNESS_SAVE(m, n) \
279 if ((m)->mtx_witness != NULL) \
280 witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
283 #define WITNESS_RESTORE(m, n) \
285 if ((m)->mtx_witness != NULL) \
286 witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
289 void witness_init(struct mtx *, int flag);
290 void witness_destroy(struct mtx *);
291 void witness_enter(struct mtx *, int, const char *, int);
292 void witness_try_enter(struct mtx *, int, const char *, int);
293 void witness_exit(struct mtx *, int, const char *, int);
294 void witness_display(void(*)(const char *fmt, ...));
295 void witness_list(struct proc *);
296 int witness_sleep(int, struct mtx *, const char *, int);
297 void witness_save(struct mtx *, const char **, int *);
298 void witness_restore(struct mtx *, const char *, int);
300 #define WITNESS_ENTER(m, t, f, l)
301 #define WITNESS_EXIT(m, t, f, l)
302 #define WITNESS_SLEEP(check, m)
303 #define WITNESS_SAVE_DECL(n)
304 #define WITNESS_SAVE(m, n)
305 #define WITNESS_RESTORE(m, n)
308 * flag++ is slezoid way of shutting up unused parameter warning
311 #define witness_init(m, flag) flag++
312 #define witness_destroy(m)
313 #define witness_enter(m, t, f, l)
314 #define witness_try_enter(m, t, f, l)
315 #define witness_exit(m, t, f, l)
319 * Assembly macros (for internal use only)
320 *------------------------------------------------------------------------------
323 #define _V(x) __STRING(x)
326 * Default, unoptimized mutex micro-operations
330 /* Actually obtain mtx_lock */
331 #define _obtain_lock(mp, tid) \
332 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
335 #ifndef _release_lock
336 /* Actually release mtx_lock */
337 #define _release_lock(mp, tid) \
338 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
341 #ifndef _release_lock_quick
342 /* Actually release mtx_lock quickly assuming that we own it */
343 #define _release_lock_quick(mp) \
344 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
347 #ifndef _getlock_sleep
348 /* Get a sleep lock, deal with recursion inline. */
349 #define _getlock_sleep(mp, tid, type) do { \
350 if (!_obtain_lock(mp, tid)) { \
351 if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
352 mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
354 atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE); \
355 (mp)->mtx_recurse++; \
361 #ifndef _getlock_spin_block
362 /* Get a spin lock, handle recursion inline (as the less common case) */
363 #define _getlock_spin_block(mp, tid, type) do { \
364 u_int _mtx_intr = save_intr(); \
366 if (!_obtain_lock(mp, tid)) \
367 mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
369 (mp)->mtx_saveintr = _mtx_intr; \
373 #ifndef _getlock_norecurse
375 * Get a lock without any recursion handling. Calls the hard enter function if
376 * we can't get it inline.
378 #define _getlock_norecurse(mp, tid, type) do { \
379 if (!_obtain_lock(mp, tid)) \
380 mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
384 #ifndef _exitlock_norecurse
386 * Release a sleep lock assuming we haven't recursed on it, recursion is handled
387 * in the hard function.
389 #define _exitlock_norecurse(mp, tid, type) do { \
390 if (!_release_lock(mp, tid)) \
391 mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
397 * Release a sleep lock when its likely we recursed (the code to
398 * deal with simple recursion is inline).
400 #define _exitlock(mp, tid, type) do { \
401 if (!_release_lock(mp, tid)) { \
402 if ((mp)->mtx_lock & MTX_RECURSE) { \
403 if (--((mp)->mtx_recurse) == 0) \
404 atomic_clear_ptr(&(mp)->mtx_lock, \
407 mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
413 #ifndef _exitlock_spin
414 /* Release a spin lock (with possible recursion). */
415 #define _exitlock_spin(mp) do { \
416 if ((mp)->mtx_recurse == 0) { \
417 int _mtx_intr = (mp)->mtx_saveintr; \
419 _release_lock_quick(mp); \
420 restore_intr(_mtx_intr); \
422 (mp)->mtx_recurse--; \
428 * Externally visible mutex functions.
429 *------------------------------------------------------------------------------
433 * Return non-zero if a mutex is already owned by the current thread.
435 #define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
438 * Return non-zero if a mutex has been recursively acquired.
440 #define mtx_recursed(m) ((m)->mtx_recurse != 0)
443 #ifdef _KERN_MUTEX_C_
447 * KTR_EXTEND saves file name and line for all entries, so we don't need them
448 * here. Theoretically we should also change the entries which refer to them
449 * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
450 * parameters, it doesn't do any harm to leave them.
452 char STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d";
453 char STR_mtx_exit_fmt[] = "REL %s [%p] r=%d";
454 char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d";
456 char STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d at %s:%d";
457 char STR_mtx_exit_fmt[] = "REL %s [%p] r=%d at %s:%d";
458 char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d at %s:%d";
460 char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
461 char STR_mtx_owned[] = "mtx_owned(mpp)";
462 char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
463 #else /* _KERN_MUTEX_C_ */
464 extern char STR_mtx_enter_fmt[];
465 extern char STR_mtx_bad_type[];
466 extern char STR_mtx_exit_fmt[];
467 extern char STR_mtx_owned[];
468 extern char STR_mtx_recurse[];
469 extern char STR_mtx_try_enter_fmt[];
470 #endif /* _KERN_MUTEX_C_ */
474 * Get lock 'm', the macro handles the easy (and most common cases) and leaves
475 * the slow stuff to the mtx_enter_hard() function.
477 * Note: since type is usually a constant much of this code is optimized out.
480 _mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
482 struct mtx *mpp = mtxp;
484 /* bits only valid on mtx_exit() */
485 MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
488 if ((type) & MTX_SPIN) {
490 * Easy cases of spin locks:
492 * 1) We already own the lock and will simply recurse on it (if
495 * 2) The lock is free, we just get it
497 if ((type) & MTX_RLIKELY) {
499 * Check for recursion, if we already have this
500 * lock we just bump the recursion count.
502 if (mpp->mtx_lock == (uintptr_t)CURTHD) {
508 if (((type) & MTX_TOPHALF) == 0) {
510 * If an interrupt thread uses this we must block
513 if ((type) & MTX_FIRST) {
516 _getlock_norecurse(mpp, CURTHD,
517 (type) & MTX_HARDOPTS);
519 _getlock_spin_block(mpp, CURTHD,
520 (type) & MTX_HARDOPTS);
523 _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
526 if ((type) & MTX_RLIKELY)
527 _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
529 _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
532 WITNESS_ENTER(mpp, type, file, line);
533 CTR5(KTR_LOCK, STR_mtx_enter_fmt,
534 mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
539 * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
541 * XXX DOES NOT HANDLE RECURSION
544 _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
546 struct mtx *const mpp = mtxp;
549 rval = _obtain_lock(mpp, CURTHD);
551 if (rval && mpp->mtx_witness != NULL) {
552 MPASS(mpp->mtx_recurse == 0);
553 witness_try_enter(mpp, type, file, line);
556 CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
557 mpp->mtx_description, mpp, rval, file, line);
566 _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
568 struct mtx *const mpp = mtxp;
570 MPASS2(mtx_owned(mpp), STR_mtx_owned);
571 WITNESS_EXIT(mpp, type, file, line);
572 CTR5(KTR_LOCK, STR_mtx_exit_fmt,
573 mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
574 if ((type) & MTX_SPIN) {
575 if ((type) & MTX_NORECURSE) {
576 int mtx_intr = mpp->mtx_saveintr;
578 MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
579 _release_lock_quick(mpp);
580 if (((type) & MTX_TOPHALF) == 0) {
581 if ((type) & MTX_FIRST) {
585 restore_intr(mtx_intr);
588 if (((type & MTX_TOPHALF) == 0) &&
589 (type & MTX_FIRST)) {
596 /* Handle sleep locks */
597 if ((type) & MTX_RLIKELY)
598 _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
600 _exitlock_norecurse(mpp, CURTHD,
601 (type) & MTX_HARDOPTS);
606 #endif /* KLD_MODULE */
608 /* Avoid namespace pollution */
609 #ifndef _KERN_MUTEX_C_
612 #undef _release_lock_quick
613 #undef _getlock_sleep
614 #undef _getlock_spin_block
615 #undef _getlock_norecurse
616 #undef _exitlock_norecurse
618 #undef _exitlock_spin
619 #endif /* !_KERN_MUTEX_C_ */
623 #endif /* _SYS_MUTEX_H_ */