2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Machine independent bits of reader/writer lock implementation.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_kdtrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/kernel.h>
47 #include <sys/mutex.h>
49 #include <sys/rmlock.h>
50 #include <sys/sched.h>
52 #include <sys/turnstile.h>
53 #include <sys/lock_profile.h>
54 #include <machine/cpu.h>
61 * A cookie to mark destroyed rmlocks. This is stored in the head of
64 #define RM_DESTROYED ((void *)0xdead)
66 #define rm_destroyed(rm) \
67 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
69 #define RMPF_ONQUEUE 1
73 #define _rm_assert(c, what, file, line)
76 static void assert_rm(struct lock_object *lock, int what);
78 static void db_show_rm(struct lock_object *lock);
80 static void lock_rm(struct lock_object *lock, int how);
82 static int owner_rm(struct lock_object *lock, struct thread **owner);
84 static int unlock_rm(struct lock_object *lock);
86 struct lock_class lock_class_rm = {
88 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
89 .lc_assert = assert_rm,
91 .lc_ddb_show = db_show_rm,
94 .lc_unlock = unlock_rm,
100 struct lock_class lock_class_rm_sleepable = {
101 .lc_name = "sleepable rm",
102 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
103 .lc_assert = assert_rm,
105 .lc_ddb_show = db_show_rm,
108 .lc_unlock = unlock_rm,
110 .lc_owner = owner_rm,
115 assert_rm(struct lock_object *lock, int what)
118 rm_assert((struct rmlock *)lock, what);
122 * These do not support read locks because it would be hard to make
123 * the tracker work correctly with the current lock_class API as you
124 * would need to have the tracker pointer available when calling
125 * rm_rlock() in lock_rm().
128 lock_rm(struct lock_object *lock, int how)
132 rm = (struct rmlock *)lock;
137 panic("lock_rm called in read mode");
142 unlock_rm(struct lock_object *lock)
146 rm = (struct rmlock *)lock;
153 owner_rm(struct lock_object *lock, struct thread **owner)
156 struct lock_class *lc;
158 rm = (struct rmlock *)lock;
159 lc = LOCK_CLASS(&rm->rm_wlock_object);
160 return (lc->lc_owner(&rm->rm_wlock_object, owner));
164 static struct mtx rm_spinlock;
166 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
169 * Add or remove tracker from per-cpu list.
171 * The per-cpu list can be traversed at any time in forward direction from an
172 * interrupt on the *local* cpu.
175 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
177 struct rm_queue *next;
179 /* Initialize all tracker pointers */
180 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
181 next = pc->pc_rm_queue.rmq_next;
182 tracker->rmp_cpuQueue.rmq_next = next;
184 /* rmq_prev is not used during froward traversal. */
185 next->rmq_prev = &tracker->rmp_cpuQueue;
187 /* Update pointer to first element. */
188 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
192 * Return a count of the number of trackers the thread 'td' already
193 * has on this CPU for the lock 'rm'.
196 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
197 const struct thread *td)
199 struct rm_queue *queue;
200 struct rm_priotracker *tracker;
204 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
205 queue = queue->rmq_next) {
206 tracker = (struct rm_priotracker *)queue;
207 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
214 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
216 struct rm_queue *next, *prev;
218 next = tracker->rmp_cpuQueue.rmq_next;
219 prev = tracker->rmp_cpuQueue.rmq_prev;
221 /* Not used during forward traversal. */
222 next->rmq_prev = prev;
224 /* Remove from list. */
225 prev->rmq_next = next;
229 rm_cleanIPI(void *arg)
232 struct rmlock *rm = arg;
233 struct rm_priotracker *tracker;
234 struct rm_queue *queue;
235 pc = pcpu_find(curcpu);
237 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
238 queue = queue->rmq_next) {
239 tracker = (struct rm_priotracker *)queue;
240 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
241 tracker->rmp_flags = RMPF_ONQUEUE;
242 mtx_lock_spin(&rm_spinlock);
243 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
245 mtx_unlock_spin(&rm_spinlock);
251 rm_init_flags(struct rmlock *rm, const char *name, int opts)
253 struct lock_class *lc;
257 if (!(opts & RM_NOWITNESS))
258 liflags |= LO_WITNESS;
259 if (opts & RM_RECURSE)
260 liflags |= LO_RECURSABLE;
261 rm->rm_writecpus = all_cpus;
262 LIST_INIT(&rm->rm_activeReaders);
263 if (opts & RM_SLEEPABLE) {
264 liflags |= LO_SLEEPABLE;
265 lc = &lock_class_rm_sleepable;
266 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
269 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
271 lock_init(&rm->lock_object, lc, name, NULL, liflags);
275 rm_init(struct rmlock *rm, const char *name)
278 rm_init_flags(rm, name, 0);
282 rm_destroy(struct rmlock *rm)
285 rm_assert(rm, RA_UNLOCKED);
286 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
287 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
288 sx_destroy(&rm->rm_lock_sx);
290 mtx_destroy(&rm->rm_lock_mtx);
291 lock_destroy(&rm->lock_object);
295 rm_wowned(struct rmlock *rm)
298 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
299 return (sx_xlocked(&rm->rm_lock_sx));
301 return (mtx_owned(&rm->rm_lock_mtx));
305 rm_sysinit(void *arg)
307 struct rm_args *args = arg;
309 rm_init(args->ra_rm, args->ra_desc);
313 rm_sysinit_flags(void *arg)
315 struct rm_args_flags *args = arg;
317 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
321 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
326 pc = pcpu_find(curcpu);
328 /* Check if we just need to do a proper critical_exit. */
329 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
334 /* Remove our tracker from the per-cpu list. */
335 rm_tracker_remove(pc, tracker);
337 /* Check to see if the IPI granted us the lock after all. */
338 if (tracker->rmp_flags) {
339 /* Just add back tracker - we hold the lock. */
340 rm_tracker_add(pc, tracker);
346 * We allow readers to aquire a lock even if a writer is blocked if
347 * the lock is recursive and the reader already holds the lock.
349 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
351 * Just grant the lock if this thread already has a tracker
352 * for this lock on the per-cpu queue.
354 if (rm_trackers_present(pc, rm, curthread) != 0) {
355 mtx_lock_spin(&rm_spinlock);
356 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
358 tracker->rmp_flags = RMPF_ONQUEUE;
359 mtx_unlock_spin(&rm_spinlock);
360 rm_tracker_add(pc, tracker);
370 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
371 if (!sx_try_xlock(&rm->rm_lock_sx))
374 if (!mtx_trylock(&rm->rm_lock_mtx))
378 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
379 THREAD_SLEEPING_OK();
380 sx_xlock(&rm->rm_lock_sx);
381 THREAD_NO_SLEEPING();
383 mtx_lock(&rm->rm_lock_mtx);
387 pc = pcpu_find(curcpu);
388 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
389 rm_tracker_add(pc, tracker);
393 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
394 sx_xunlock(&rm->rm_lock_sx);
396 mtx_unlock(&rm->rm_lock_mtx);
402 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
404 struct thread *td = curthread;
407 if (SCHEDULER_STOPPED())
410 tracker->rmp_flags = 0;
411 tracker->rmp_thread = td;
412 tracker->rmp_rmlock = rm;
414 td->td_critnest++; /* critical_enter(); */
418 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
420 rm_tracker_add(pc, tracker);
429 * Fast path to combine two common conditions into a single
432 if (0 == (td->td_owepreempt |
433 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
436 /* We do not have a read token and need to acquire one. */
437 return _rm_rlock_hard(rm, tracker, trylock);
441 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
444 if (td->td_owepreempt) {
449 if (!tracker->rmp_flags)
452 mtx_lock_spin(&rm_spinlock);
453 LIST_REMOVE(tracker, rmp_qentry);
455 if (tracker->rmp_flags & RMPF_SIGNAL) {
457 struct turnstile *ts;
459 rm = tracker->rmp_rmlock;
461 turnstile_chain_lock(&rm->lock_object);
462 mtx_unlock_spin(&rm_spinlock);
464 ts = turnstile_lookup(&rm->lock_object);
466 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
467 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
468 turnstile_chain_unlock(&rm->lock_object);
470 mtx_unlock_spin(&rm_spinlock);
474 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
477 struct thread *td = tracker->rmp_thread;
479 if (SCHEDULER_STOPPED())
482 td->td_critnest++; /* critical_enter(); */
483 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
484 rm_tracker_remove(pc, tracker);
488 if (0 == (td->td_owepreempt | tracker->rmp_flags))
491 _rm_unlock_hard(td, tracker);
495 _rm_wlock(struct rmlock *rm)
497 struct rm_priotracker *prio;
498 struct turnstile *ts;
501 if (SCHEDULER_STOPPED())
504 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
505 sx_xlock(&rm->rm_lock_sx);
507 mtx_lock(&rm->rm_lock_mtx);
509 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
510 /* Get all read tokens back */
512 CPU_NAND(&readcpus, &rm->rm_writecpus);
513 rm->rm_writecpus = all_cpus;
516 * Assumes rm->rm_writecpus update is visible on other CPUs
517 * before rm_cleanIPI is called.
520 smp_rendezvous_cpus(readcpus,
521 smp_no_rendevous_barrier,
523 smp_no_rendevous_barrier,
530 mtx_lock_spin(&rm_spinlock);
531 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
532 ts = turnstile_trywait(&rm->lock_object);
533 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
534 mtx_unlock_spin(&rm_spinlock);
535 turnstile_wait(ts, prio->rmp_thread,
537 mtx_lock_spin(&rm_spinlock);
539 mtx_unlock_spin(&rm_spinlock);
544 _rm_wunlock(struct rmlock *rm)
547 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
548 sx_xunlock(&rm->rm_lock_sx);
550 mtx_unlock(&rm->rm_lock_mtx);
556 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
559 if (SCHEDULER_STOPPED())
562 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
563 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
564 curthread, rm->lock_object.lo_name, file, line));
565 KASSERT(!rm_destroyed(rm),
566 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
567 _rm_assert(rm, RA_UNLOCKED, file, line);
569 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
574 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
576 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
578 curthread->td_locks++;
583 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
586 if (SCHEDULER_STOPPED())
589 KASSERT(!rm_destroyed(rm),
590 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
591 _rm_assert(rm, RA_WLOCKED, file, line);
592 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
593 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
595 curthread->td_locks--;
599 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
600 int trylock, const char *file, int line)
603 if (SCHEDULER_STOPPED())
607 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
609 KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
611 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
612 rm->lock_object.lo_name, file, line));
616 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
617 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
618 curthread, rm->lock_object.lo_name, file, line));
619 KASSERT(!rm_destroyed(rm),
620 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
622 KASSERT(!rm_wowned(rm),
623 ("rm_rlock: wlock already held for %s @ %s:%d",
624 rm->lock_object.lo_name, file, line));
625 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
629 if (_rm_rlock(rm, tracker, trylock)) {
631 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
634 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
636 WITNESS_LOCK(&rm->lock_object, 0, file, line);
638 curthread->td_locks++;
642 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
648 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
649 const char *file, int line)
652 if (SCHEDULER_STOPPED())
655 KASSERT(!rm_destroyed(rm),
656 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
657 _rm_assert(rm, RA_RLOCKED, file, line);
658 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
659 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
660 _rm_runlock(rm, tracker);
661 curthread->td_locks--;
667 * Just strip out file and line arguments if no lock debugging is enabled in
668 * the kernel - we are called from a kernel module.
671 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
678 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
685 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
686 int trylock, const char *file, int line)
689 return _rm_rlock(rm, tracker, trylock);
693 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
694 const char *file, int line)
697 _rm_runlock(rm, tracker);
702 #ifdef INVARIANT_SUPPORT
708 * Note that this does not need to use witness_assert() for read lock
709 * assertions since an exact count of read locks held by this thread
713 _rm_assert(struct rmlock *rm, int what, const char *file, int line)
717 if (panicstr != NULL)
721 case RA_LOCKED | RA_RECURSED:
722 case RA_LOCKED | RA_NOTRECURSED:
724 case RA_RLOCKED | RA_RECURSED:
725 case RA_RLOCKED | RA_NOTRECURSED:
727 * Handle the write-locked case. Unlike other
728 * primitives, writers can never recurse.
731 if (what & RA_RLOCKED)
732 panic("Lock %s exclusively locked @ %s:%d\n",
733 rm->lock_object.lo_name, file, line);
734 if (what & RA_RECURSED)
735 panic("Lock %s not recursed @ %s:%d\n",
736 rm->lock_object.lo_name, file, line);
741 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
745 panic("Lock %s not %slocked @ %s:%d\n",
746 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
747 "read " : "", file, line);
749 if (what & RA_NOTRECURSED)
750 panic("Lock %s recursed @ %s:%d\n",
751 rm->lock_object.lo_name, file, line);
752 } else if (what & RA_RECURSED)
753 panic("Lock %s not recursed @ %s:%d\n",
754 rm->lock_object.lo_name, file, line);
758 panic("Lock %s not exclusively locked @ %s:%d\n",
759 rm->lock_object.lo_name, file, line);
763 panic("Lock %s exclusively locked @ %s:%d\n",
764 rm->lock_object.lo_name, file, line);
767 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
771 panic("Lock %s read locked @ %s:%d\n",
772 rm->lock_object.lo_name, file, line);
775 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
779 #endif /* INVARIANT_SUPPORT */
783 print_tracker(struct rm_priotracker *tr)
788 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
789 td->td_proc->p_pid, td->td_name);
790 if (tr->rmp_flags & RMPF_ONQUEUE) {
791 db_printf("ONQUEUE");
792 if (tr->rmp_flags & RMPF_SIGNAL)
793 db_printf(",SIGNAL");
800 db_show_rm(struct lock_object *lock)
802 struct rm_priotracker *tr;
803 struct rm_queue *queue;
805 struct lock_class *lc;
808 rm = (struct rmlock *)lock;
809 db_printf(" writecpus: ");
810 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
812 db_printf(" per-CPU readers:\n");
813 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
814 for (queue = pc->pc_rm_queue.rmq_next;
815 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
816 tr = (struct rm_priotracker *)queue;
817 if (tr->rmp_rmlock == rm)
820 db_printf(" active readers:\n");
821 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
823 lc = LOCK_CLASS(&rm->rm_wlock_object);
824 db_printf("Backing write-lock (%s):\n", lc->lc_name);
825 lc->lc_ddb_show(&rm->rm_wlock_object);