2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Machine independent bits of reader/writer lock implementation.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/kernel.h>
46 #include <sys/mutex.h>
48 #include <sys/rmlock.h>
49 #include <sys/sched.h>
51 #include <sys/turnstile.h>
52 #include <sys/lock_profile.h>
53 #include <machine/cpu.h>
60 * A cookie to mark destroyed rmlocks. This is stored in the head of
63 #define RM_DESTROYED ((void *)0xdead)
65 #define rm_destroyed(rm) \
66 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
68 #define RMPF_ONQUEUE 1
72 #define _rm_assert(c, what, file, line)
75 static void assert_rm(const struct lock_object *lock, int what);
77 static void db_show_rm(const struct lock_object *lock);
79 static void lock_rm(struct lock_object *lock, uintptr_t how);
81 static int owner_rm(const struct lock_object *lock, struct thread **owner);
83 static uintptr_t unlock_rm(struct lock_object *lock);
85 struct lock_class lock_class_rm = {
87 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
88 .lc_assert = assert_rm,
90 .lc_ddb_show = db_show_rm,
93 .lc_unlock = unlock_rm,
99 struct lock_class lock_class_rm_sleepable = {
100 .lc_name = "sleepable rm",
101 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
102 .lc_assert = assert_rm,
104 .lc_ddb_show = db_show_rm,
107 .lc_unlock = unlock_rm,
109 .lc_owner = owner_rm,
114 assert_rm(const struct lock_object *lock, int what)
117 rm_assert((const struct rmlock *)lock, what);
121 lock_rm(struct lock_object *lock, uintptr_t how)
124 struct rm_priotracker *tracker;
126 rm = (struct rmlock *)lock;
130 tracker = (struct rm_priotracker *)how;
131 rm_rlock(rm, tracker);
136 unlock_rm(struct lock_object *lock)
141 struct rm_queue *queue;
142 struct rm_priotracker *tracker;
145 rm = (struct rmlock *)lock;
148 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
153 * Find the right rm_priotracker structure for curthread.
154 * The guarantee about its uniqueness is given by the fact
155 * we already asserted the lock wasn't recursively acquired.
159 pc = pcpu_find(curcpu);
160 for (queue = pc->pc_rm_queue.rmq_next;
161 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
162 tracker = (struct rm_priotracker *)queue;
163 if ((tracker->rmp_rmlock == rm) &&
164 (tracker->rmp_thread == td)) {
165 how = (uintptr_t)tracker;
169 KASSERT(tracker != NULL,
170 ("rm_priotracker is non-NULL when lock held in read mode"));
172 rm_runlock(rm, tracker);
179 owner_rm(const struct lock_object *lock, struct thread **owner)
181 const struct rmlock *rm;
182 struct lock_class *lc;
184 rm = (const struct rmlock *)lock;
185 lc = LOCK_CLASS(&rm->rm_wlock_object);
186 return (lc->lc_owner(&rm->rm_wlock_object, owner));
190 static struct mtx rm_spinlock;
192 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
195 * Add or remove tracker from per-cpu list.
197 * The per-cpu list can be traversed at any time in forward direction from an
198 * interrupt on the *local* cpu.
201 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
203 struct rm_queue *next;
205 /* Initialize all tracker pointers */
206 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
207 next = pc->pc_rm_queue.rmq_next;
208 tracker->rmp_cpuQueue.rmq_next = next;
210 /* rmq_prev is not used during froward traversal. */
211 next->rmq_prev = &tracker->rmp_cpuQueue;
213 /* Update pointer to first element. */
214 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
218 * Return a count of the number of trackers the thread 'td' already
219 * has on this CPU for the lock 'rm'.
222 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
223 const struct thread *td)
225 struct rm_queue *queue;
226 struct rm_priotracker *tracker;
230 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
231 queue = queue->rmq_next) {
232 tracker = (struct rm_priotracker *)queue;
233 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
240 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
242 struct rm_queue *next, *prev;
244 next = tracker->rmp_cpuQueue.rmq_next;
245 prev = tracker->rmp_cpuQueue.rmq_prev;
247 /* Not used during forward traversal. */
248 next->rmq_prev = prev;
250 /* Remove from list. */
251 prev->rmq_next = next;
255 rm_cleanIPI(void *arg)
258 struct rmlock *rm = arg;
259 struct rm_priotracker *tracker;
260 struct rm_queue *queue;
261 pc = pcpu_find(curcpu);
263 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
264 queue = queue->rmq_next) {
265 tracker = (struct rm_priotracker *)queue;
266 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
267 tracker->rmp_flags = RMPF_ONQUEUE;
268 mtx_lock_spin(&rm_spinlock);
269 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
271 mtx_unlock_spin(&rm_spinlock);
277 rm_init_flags(struct rmlock *rm, const char *name, int opts)
279 struct lock_class *lc;
283 if (!(opts & RM_NOWITNESS))
284 liflags |= LO_WITNESS;
285 if (opts & RM_RECURSE)
286 liflags |= LO_RECURSABLE;
289 rm->rm_writecpus = all_cpus;
290 LIST_INIT(&rm->rm_activeReaders);
291 if (opts & RM_SLEEPABLE) {
292 liflags |= LO_SLEEPABLE;
293 lc = &lock_class_rm_sleepable;
294 xflags = (opts & RM_NEW ? SX_NEW : 0);
295 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
296 xflags | SX_NOWITNESS);
299 xflags = (opts & RM_NEW ? MTX_NEW : 0);
300 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
301 xflags | MTX_NOWITNESS);
303 lock_init(&rm->lock_object, lc, name, NULL, liflags);
307 rm_init(struct rmlock *rm, const char *name)
310 rm_init_flags(rm, name, 0);
314 rm_destroy(struct rmlock *rm)
317 rm_assert(rm, RA_UNLOCKED);
318 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
319 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
320 sx_destroy(&rm->rm_lock_sx);
322 mtx_destroy(&rm->rm_lock_mtx);
323 lock_destroy(&rm->lock_object);
327 rm_wowned(const struct rmlock *rm)
330 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
331 return (sx_xlocked(&rm->rm_lock_sx));
333 return (mtx_owned(&rm->rm_lock_mtx));
337 rm_sysinit(void *arg)
339 struct rm_args *args;
342 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
346 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
351 pc = pcpu_find(curcpu);
353 /* Check if we just need to do a proper critical_exit. */
354 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
359 /* Remove our tracker from the per-cpu list. */
360 rm_tracker_remove(pc, tracker);
362 /* Check to see if the IPI granted us the lock after all. */
363 if (tracker->rmp_flags) {
364 /* Just add back tracker - we hold the lock. */
365 rm_tracker_add(pc, tracker);
371 * We allow readers to acquire a lock even if a writer is blocked if
372 * the lock is recursive and the reader already holds the lock.
374 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
376 * Just grant the lock if this thread already has a tracker
377 * for this lock on the per-cpu queue.
379 if (rm_trackers_present(pc, rm, curthread) != 0) {
380 mtx_lock_spin(&rm_spinlock);
381 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
383 tracker->rmp_flags = RMPF_ONQUEUE;
384 mtx_unlock_spin(&rm_spinlock);
385 rm_tracker_add(pc, tracker);
395 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
396 if (!sx_try_xlock(&rm->rm_lock_sx))
399 if (!mtx_trylock(&rm->rm_lock_mtx))
403 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
404 THREAD_SLEEPING_OK();
405 sx_xlock(&rm->rm_lock_sx);
406 THREAD_NO_SLEEPING();
408 mtx_lock(&rm->rm_lock_mtx);
412 pc = pcpu_find(curcpu);
413 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
414 rm_tracker_add(pc, tracker);
418 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
419 sx_xunlock(&rm->rm_lock_sx);
421 mtx_unlock(&rm->rm_lock_mtx);
427 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
429 struct thread *td = curthread;
432 if (SCHEDULER_STOPPED())
435 tracker->rmp_flags = 0;
436 tracker->rmp_thread = td;
437 tracker->rmp_rmlock = rm;
439 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
440 THREAD_NO_SLEEPING();
442 td->td_critnest++; /* critical_enter(); */
446 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
448 rm_tracker_add(pc, tracker);
457 * Fast path to combine two common conditions into a single
460 if (0 == (td->td_owepreempt |
461 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
464 /* We do not have a read token and need to acquire one. */
465 return _rm_rlock_hard(rm, tracker, trylock);
469 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
472 if (td->td_owepreempt) {
477 if (!tracker->rmp_flags)
480 mtx_lock_spin(&rm_spinlock);
481 LIST_REMOVE(tracker, rmp_qentry);
483 if (tracker->rmp_flags & RMPF_SIGNAL) {
485 struct turnstile *ts;
487 rm = tracker->rmp_rmlock;
489 turnstile_chain_lock(&rm->lock_object);
490 mtx_unlock_spin(&rm_spinlock);
492 ts = turnstile_lookup(&rm->lock_object);
494 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
495 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
496 turnstile_chain_unlock(&rm->lock_object);
498 mtx_unlock_spin(&rm_spinlock);
502 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
505 struct thread *td = tracker->rmp_thread;
507 if (SCHEDULER_STOPPED())
510 td->td_critnest++; /* critical_enter(); */
511 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
512 rm_tracker_remove(pc, tracker);
516 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
517 THREAD_SLEEPING_OK();
519 if (0 == (td->td_owepreempt | tracker->rmp_flags))
522 _rm_unlock_hard(td, tracker);
526 _rm_wlock(struct rmlock *rm)
528 struct rm_priotracker *prio;
529 struct turnstile *ts;
532 if (SCHEDULER_STOPPED())
535 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
536 sx_xlock(&rm->rm_lock_sx);
538 mtx_lock(&rm->rm_lock_mtx);
540 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
541 /* Get all read tokens back */
543 CPU_NAND(&readcpus, &rm->rm_writecpus);
544 rm->rm_writecpus = all_cpus;
547 * Assumes rm->rm_writecpus update is visible on other CPUs
548 * before rm_cleanIPI is called.
551 smp_rendezvous_cpus(readcpus,
552 smp_no_rendezvous_barrier,
554 smp_no_rendezvous_barrier,
561 mtx_lock_spin(&rm_spinlock);
562 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
563 ts = turnstile_trywait(&rm->lock_object);
564 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
565 mtx_unlock_spin(&rm_spinlock);
566 turnstile_wait(ts, prio->rmp_thread,
568 mtx_lock_spin(&rm_spinlock);
570 mtx_unlock_spin(&rm_spinlock);
575 _rm_wunlock(struct rmlock *rm)
578 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
579 sx_xunlock(&rm->rm_lock_sx);
581 mtx_unlock(&rm->rm_lock_mtx);
587 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
590 if (SCHEDULER_STOPPED())
593 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
594 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
595 curthread, rm->lock_object.lo_name, file, line));
596 KASSERT(!rm_destroyed(rm),
597 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
598 _rm_assert(rm, RA_UNLOCKED, file, line);
600 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
605 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
606 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
607 TD_LOCKS_INC(curthread);
611 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
614 if (SCHEDULER_STOPPED())
617 KASSERT(!rm_destroyed(rm),
618 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
619 _rm_assert(rm, RA_WLOCKED, file, line);
620 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
621 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
623 TD_LOCKS_DEC(curthread);
627 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
628 int trylock, const char *file, int line)
631 if (SCHEDULER_STOPPED())
635 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
637 KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
639 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
640 rm->lock_object.lo_name, file, line));
644 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
645 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
646 curthread, rm->lock_object.lo_name, file, line));
647 KASSERT(!rm_destroyed(rm),
648 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
650 KASSERT(!rm_wowned(rm),
651 ("rm_rlock: wlock already held for %s @ %s:%d",
652 rm->lock_object.lo_name, file, line));
653 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
657 if (_rm_rlock(rm, tracker, trylock)) {
659 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
662 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
664 WITNESS_LOCK(&rm->lock_object, 0, file, line);
665 TD_LOCKS_INC(curthread);
668 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
674 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
675 const char *file, int line)
678 if (SCHEDULER_STOPPED())
681 KASSERT(!rm_destroyed(rm),
682 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
683 _rm_assert(rm, RA_RLOCKED, file, line);
684 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
685 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
686 _rm_runlock(rm, tracker);
687 TD_LOCKS_DEC(curthread);
693 * Just strip out file and line arguments if no lock debugging is enabled in
694 * the kernel - we are called from a kernel module.
697 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
704 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
711 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
712 int trylock, const char *file, int line)
715 return _rm_rlock(rm, tracker, trylock);
719 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
720 const char *file, int line)
723 _rm_runlock(rm, tracker);
728 #ifdef INVARIANT_SUPPORT
734 * Note that this does not need to use witness_assert() for read lock
735 * assertions since an exact count of read locks held by this thread
739 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
743 if (SCHEDULER_STOPPED())
747 case RA_LOCKED | RA_RECURSED:
748 case RA_LOCKED | RA_NOTRECURSED:
750 case RA_RLOCKED | RA_RECURSED:
751 case RA_RLOCKED | RA_NOTRECURSED:
753 * Handle the write-locked case. Unlike other
754 * primitives, writers can never recurse.
757 if (what & RA_RLOCKED)
758 panic("Lock %s exclusively locked @ %s:%d\n",
759 rm->lock_object.lo_name, file, line);
760 if (what & RA_RECURSED)
761 panic("Lock %s not recursed @ %s:%d\n",
762 rm->lock_object.lo_name, file, line);
767 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
771 panic("Lock %s not %slocked @ %s:%d\n",
772 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
773 "read " : "", file, line);
775 if (what & RA_NOTRECURSED)
776 panic("Lock %s recursed @ %s:%d\n",
777 rm->lock_object.lo_name, file, line);
778 } else if (what & RA_RECURSED)
779 panic("Lock %s not recursed @ %s:%d\n",
780 rm->lock_object.lo_name, file, line);
784 panic("Lock %s not exclusively locked @ %s:%d\n",
785 rm->lock_object.lo_name, file, line);
789 panic("Lock %s exclusively locked @ %s:%d\n",
790 rm->lock_object.lo_name, file, line);
793 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
797 panic("Lock %s read locked @ %s:%d\n",
798 rm->lock_object.lo_name, file, line);
801 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
805 #endif /* INVARIANT_SUPPORT */
809 print_tracker(struct rm_priotracker *tr)
814 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
815 td->td_proc->p_pid, td->td_name);
816 if (tr->rmp_flags & RMPF_ONQUEUE) {
817 db_printf("ONQUEUE");
818 if (tr->rmp_flags & RMPF_SIGNAL)
819 db_printf(",SIGNAL");
826 db_show_rm(const struct lock_object *lock)
828 struct rm_priotracker *tr;
829 struct rm_queue *queue;
830 const struct rmlock *rm;
831 struct lock_class *lc;
834 rm = (const struct rmlock *)lock;
835 db_printf(" writecpus: ");
836 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
838 db_printf(" per-CPU readers:\n");
839 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
840 for (queue = pc->pc_rm_queue.rmq_next;
841 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
842 tr = (struct rm_priotracker *)queue;
843 if (tr->rmp_rmlock == rm)
846 db_printf(" active readers:\n");
847 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
849 lc = LOCK_CLASS(&rm->rm_wlock_object);
850 db_printf("Backing write-lock (%s):\n", lc->lc_name);
851 lc->lc_ddb_show(&rm->rm_wlock_object);