3 * The Regents of the University of California. All rights reserved.
6 * John S. Dyson. All rights reserved.
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
47 #include "opt_global.h"
49 #include <sys/param.h>
51 #include <sys/kernel.h>
54 #include <sys/lockmgr.h>
55 #include <sys/mutex.h>
57 #include <sys/systm.h>
58 #include <sys/lock_profile.h>
60 #include <sys/stack.h>
65 static void db_show_lockmgr(struct lock_object *lock);
69 struct lock_class lock_class_lockmgr = {
71 LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
79 * Locking primitives implementation.
80 * Locks provide shared/exclusive sychronization.
83 #define COUNT(td, x) if ((td)) (td)->td_locks += (x)
84 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
85 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
87 static int acquire(struct lock **lkpp, int extflags, int wanted);
88 static int acquiredrain(struct lock *lkp, int extflags) ;
91 sharelock(struct thread *td, struct lock *lkp, int incr) {
92 lkp->lk_flags |= LK_SHARE_NONZERO;
93 lkp->lk_sharecount += incr;
98 shareunlock(struct thread *td, struct lock *lkp, int decr) {
100 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
103 if (lkp->lk_sharecount == decr) {
104 lkp->lk_flags &= ~LK_SHARE_NONZERO;
105 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
108 lkp->lk_sharecount = 0;
110 lkp->lk_sharecount -= decr;
115 acquire(struct lock **lkpp, int extflags, int wanted)
117 struct lock *lkp = *lkpp;
120 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
121 lkp, extflags, wanted);
123 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
126 while ((lkp->lk_flags & wanted) != 0) {
128 "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
130 lkp->lk_flags |= LK_WAIT_NONZERO;
132 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
134 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
136 if (lkp->lk_waitcount == 0)
137 lkp->lk_flags &= ~LK_WAIT_NONZERO;
140 if (extflags & LK_SLEEPFAIL) {
144 if (lkp->lk_newlock != NULL) {
145 mtx_lock(lkp->lk_newlock->lk_interlock);
146 mtx_unlock(lkp->lk_interlock);
147 if (lkp->lk_waitcount == 0)
148 wakeup((void *)(&lkp->lk_newlock));
149 *lkpp = lkp = lkp->lk_newlock;
152 mtx_assert(lkp->lk_interlock, MA_OWNED);
157 * Set, change, or release a lock.
159 * Shared requests increment the shared count. Exclusive requests set the
160 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
161 * accepted shared locks and shared-to-exclusive upgrades to go away.
164 _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
165 struct thread *td, char *file, int line)
170 int extflags, lockflags;
179 lock_profile_waitstart(&waitstart);
180 if ((flags & LK_INTERNAL) == 0)
181 mtx_lock(lkp->lk_interlock);
183 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
184 "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
185 lkp->lk_exclusivecount, flags, td);
188 struct stack stack; /* XXX */
190 CTRSTACK(KTR_LOCK, &stack, 0, 1);
194 if (flags & LK_INTERLOCK) {
195 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
196 mtx_unlock(interlkp);
199 if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
200 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
201 &lkp->lk_interlock->mtx_object,
202 "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
204 if (panicstr != NULL) {
205 mtx_unlock(lkp->lk_interlock);
208 if ((lkp->lk_flags & LK_NOSHARE) &&
209 (flags & LK_TYPE_MASK) == LK_SHARED) {
210 flags &= ~LK_TYPE_MASK;
211 flags |= LK_EXCLUSIVE;
213 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
215 switch (flags & LK_TYPE_MASK) {
219 * If we are not the exclusive lock holder, we have to block
220 * while there is an exclusive lock holder or while an
221 * exclusive lock request or upgrade request is in progress.
223 * However, if TDP_DEADLKTREAT is set, we override exclusive
224 * lock requests or upgrade requests ( but not the exclusive
227 if (lkp->lk_lockholder != thr) {
228 lockflags = LK_HAVE_EXCL;
229 if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
230 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
231 error = acquire(&lkp, extflags, lockflags);
234 sharelock(td, lkp, 1);
235 if (lkp->lk_sharecount == 1)
236 lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
238 #if defined(DEBUG_LOCKS)
239 stack_save(&lkp->lk_stack);
244 * We hold an exclusive lock, so downgrade it to shared.
245 * An alternative would be to fail with EDEADLK.
247 sharelock(td, lkp, 1);
248 if (lkp->lk_sharecount == 1)
249 lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
250 /* FALLTHROUGH downgrade */
253 KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
254 ("lockmgr: not holding exclusive lock "
255 "(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
256 lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
257 sharelock(td, lkp, lkp->lk_exclusivecount);
258 COUNT(td, -lkp->lk_exclusivecount);
259 lkp->lk_exclusivecount = 0;
260 lkp->lk_flags &= ~LK_HAVE_EXCL;
261 lkp->lk_lockholder = LK_NOPROC;
262 if (lkp->lk_waitcount)
268 * If another process is ahead of us to get an upgrade,
269 * then we want to fail rather than have an intervening
272 if (lkp->lk_flags & LK_WANT_UPGRADE) {
273 shareunlock(td, lkp, 1);
277 /* FALLTHROUGH normal upgrade */
281 * Upgrade a shared lock to an exclusive one. If another
282 * shared lock has already requested an upgrade to an
283 * exclusive lock, our shared lock is released and an
284 * exclusive lock is requested (which will be granted
285 * after the upgrade). If we return an error, the file
286 * will always be unlocked.
288 if (lkp->lk_lockholder == thr)
289 panic("lockmgr: upgrade exclusive lock");
290 if (lkp->lk_sharecount <= 0)
291 panic("lockmgr: upgrade without shared");
292 shareunlock(td, lkp, 1);
293 if (lkp->lk_sharecount == 0)
294 lock_profile_release_lock(&lkp->lk_object);
296 * If we are just polling, check to see if we will block.
298 if ((extflags & LK_NOWAIT) &&
299 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
300 lkp->lk_sharecount > 1)) {
304 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
306 * We are first shared lock to request an upgrade, so
307 * request upgrade and wait for the shared count to
308 * drop to zero, then take exclusive lock.
310 lkp->lk_flags |= LK_WANT_UPGRADE;
311 error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
312 lkp->lk_flags &= ~LK_WANT_UPGRADE;
315 if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
319 if (lkp->lk_exclusivecount != 0)
320 panic("lockmgr: non-zero exclusive count");
321 lkp->lk_flags |= LK_HAVE_EXCL;
322 lkp->lk_lockholder = thr;
323 lkp->lk_exclusivecount = 1;
325 lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
326 #if defined(DEBUG_LOCKS)
327 stack_save(&lkp->lk_stack);
332 * Someone else has requested upgrade. Release our shared
333 * lock, awaken upgrade requestor if we are the last shared
334 * lock, then request an exclusive lock.
336 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
339 /* FALLTHROUGH exclusive request */
342 if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
346 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
347 panic("lockmgr: locking against myself");
348 if ((extflags & LK_CANRECURSE) != 0) {
349 lkp->lk_exclusivecount++;
355 * If we are just polling, check to see if we will sleep.
357 if ((extflags & LK_NOWAIT) &&
358 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
363 * Try to acquire the want_exclusive flag.
365 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
368 lkp->lk_flags |= LK_WANT_EXCL;
370 * Wait for shared locks and upgrades to finish.
372 error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
373 lkp->lk_flags &= ~LK_WANT_EXCL;
375 if (lkp->lk_flags & LK_WAIT_NONZERO)
379 lkp->lk_flags |= LK_HAVE_EXCL;
380 lkp->lk_lockholder = thr;
381 if (lkp->lk_exclusivecount != 0)
382 panic("lockmgr: non-zero exclusive count");
383 lkp->lk_exclusivecount = 1;
385 lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
386 #if defined(DEBUG_LOCKS)
387 stack_save(&lkp->lk_stack);
392 if (lkp->lk_exclusivecount != 0) {
393 if (lkp->lk_lockholder != thr &&
394 lkp->lk_lockholder != LK_KERNPROC) {
395 panic("lockmgr: thread %p, not %s %p unlocking",
396 thr, "exclusive lock holder",
399 if (lkp->lk_lockholder != LK_KERNPROC)
401 if (lkp->lk_exclusivecount == 1) {
402 lkp->lk_flags &= ~LK_HAVE_EXCL;
403 lkp->lk_lockholder = LK_NOPROC;
404 lkp->lk_exclusivecount = 0;
405 lock_profile_release_lock(&lkp->lk_object);
407 lkp->lk_exclusivecount--;
409 } else if (lkp->lk_flags & LK_SHARE_NONZERO)
410 shareunlock(td, lkp, 1);
412 printf("lockmgr: thread %p unlocking unheld lock\n",
417 if (lkp->lk_flags & LK_WAIT_NONZERO)
423 * Check that we do not already hold the lock, as it can
424 * never drain if we do. Unfortunately, we have no way to
425 * check for holding a shared lock, but at least we can
426 * check for an exclusive one.
428 if (lkp->lk_lockholder == thr)
429 panic("lockmgr: draining against myself");
431 error = acquiredrain(lkp, extflags);
434 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
435 lkp->lk_lockholder = thr;
436 lkp->lk_exclusivecount = 1;
438 #if defined(DEBUG_LOCKS)
439 stack_save(&lkp->lk_stack);
444 mtx_unlock(lkp->lk_interlock);
445 panic("lockmgr: unknown locktype request %d",
446 flags & LK_TYPE_MASK);
449 if ((lkp->lk_flags & LK_WAITDRAIN) &&
450 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
451 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
452 lkp->lk_flags &= ~LK_WAITDRAIN;
453 wakeup((void *)&lkp->lk_flags);
455 mtx_unlock(lkp->lk_interlock);
460 acquiredrain(struct lock *lkp, int extflags) {
463 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
466 while (lkp->lk_flags & LK_ALL) {
467 lkp->lk_flags |= LK_WAITDRAIN;
468 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
470 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
473 if (extflags & LK_SLEEPFAIL) {
481 * Transfer any waiting processes from one lock to another.
484 transferlockers(from, to)
489 KASSERT(from != to, ("lock transfer to self"));
490 KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
492 mtx_lock(from->lk_interlock);
493 if (from->lk_waitcount == 0) {
494 mtx_unlock(from->lk_interlock);
497 from->lk_newlock = to;
498 wakeup((void *)from);
499 msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio,
501 from->lk_newlock = NULL;
502 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
503 KASSERT(from->lk_waitcount == 0, ("active lock"));
504 mtx_unlock(from->lk_interlock);
509 * Initialize a lock; required before use.
512 lockinit(lkp, prio, wmesg, timo, flags)
519 CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
520 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
522 lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
523 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
524 lkp->lk_sharecount = 0;
525 lkp->lk_waitcount = 0;
526 lkp->lk_exclusivecount = 0;
528 lkp->lk_wmesg = wmesg;
530 lkp->lk_lockholder = LK_NOPROC;
531 lkp->lk_newlock = NULL;
533 stack_zero(&lkp->lk_stack);
535 lock_profile_object_init(&lkp->lk_object, &lock_class_lockmgr, wmesg);
545 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
547 lock_profile_object_destroy(&lkp->lk_object);
551 * Determine the status of a lock.
563 mtx_lock(lkp->lk_interlock);
566 if (lkp->lk_exclusivecount != 0) {
567 if (td == NULL || lkp->lk_lockholder == td)
568 lock_type = LK_EXCLUSIVE;
570 lock_type = LK_EXCLOTHER;
571 } else if (lkp->lk_sharecount != 0)
572 lock_type = LK_SHARED;
574 mtx_unlock(lkp->lk_interlock);
579 * Determine the number of holders of a lock.
587 mtx_lock(lkp->lk_interlock);
588 count = lkp->lk_exclusivecount + lkp->lk_sharecount;
589 mtx_unlock(lkp->lk_interlock);
594 * Determine the number of waiters on a lock.
602 mtx_lock(lkp->lk_interlock);
603 count = lkp->lk_waitcount;
604 mtx_unlock(lkp->lk_interlock);
609 * Print out information about state of a lock. Used by VOP_PRINT
610 * routines to display status about contained locks.
613 lockmgr_printinfo(lkp)
617 if (lkp->lk_sharecount)
618 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
620 else if (lkp->lk_flags & LK_HAVE_EXCL)
621 printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
622 lkp->lk_wmesg, lkp->lk_exclusivecount,
623 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
624 if (lkp->lk_waitcount > 0)
625 printf(" with %d pending", lkp->lk_waitcount);
627 stack_print(&lkp->lk_stack);
633 * Check to see if a thread that is blocked on a sleep queue is actually
634 * blocked on a 'struct lock'. If so, output some details and return true.
635 * If the lock has an exclusive owner, return that in *ownerp.
638 lockmgr_chain(struct thread *td, struct thread **ownerp)
644 /* Simple test to see if wchan points to a lockmgr lock. */
645 if (lkp->lk_wmesg != td->td_wmesg)
648 /* Ok, we think we have a lockmgr lock, so output some details. */
649 db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
650 if (lkp->lk_sharecount) {
651 db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
654 db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
655 *ownerp = lkp->lk_lockholder;
661 db_show_lockmgr(struct lock_object *lock)
666 lkp = (struct lock *)lock;
668 db_printf("lock type: %s\n", lkp->lk_wmesg);
669 db_printf("state: ");
670 if (lkp->lk_sharecount)
671 db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
672 else if (lkp->lk_flags & LK_HAVE_EXCL) {
673 td = lkp->lk_lockholder;
674 db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
675 db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
676 td->td_proc->p_pid, td->td_proc->p_comm);
678 db_printf("UNLOCKED\n");
679 if (lkp->lk_waitcount > 0)
680 db_printf("waiters: %d\n", lkp->lk_waitcount);