2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Machine independent bits of reader/writer lock implementation.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/kernel.h>
48 #include <sys/mutex.h>
50 #include <sys/rmlock.h>
51 #include <sys/sched.h>
53 #include <sys/turnstile.h>
54 #include <sys/lock_profile.h>
55 #include <machine/cpu.h>
62 * A cookie to mark destroyed rmlocks. This is stored in the head of
65 #define RM_DESTROYED ((void *)0xdead)
67 #define rm_destroyed(rm) \
68 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
70 #define RMPF_ONQUEUE 1
74 #define _rm_assert(c, what, file, line)
77 static void assert_rm(const struct lock_object *lock, int what);
79 static void db_show_rm(const struct lock_object *lock);
81 static void lock_rm(struct lock_object *lock, uintptr_t how);
83 static int owner_rm(const struct lock_object *lock, struct thread **owner);
85 static uintptr_t unlock_rm(struct lock_object *lock);
87 struct lock_class lock_class_rm = {
89 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
90 .lc_assert = assert_rm,
92 .lc_ddb_show = db_show_rm,
95 .lc_unlock = unlock_rm,
101 struct lock_class lock_class_rm_sleepable = {
102 .lc_name = "sleepable rm",
103 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
104 .lc_assert = assert_rm,
106 .lc_ddb_show = db_show_rm,
109 .lc_unlock = unlock_rm,
111 .lc_owner = owner_rm,
116 assert_rm(const struct lock_object *lock, int what)
119 rm_assert((const struct rmlock *)lock, what);
123 lock_rm(struct lock_object *lock, uintptr_t how)
126 struct rm_priotracker *tracker;
128 rm = (struct rmlock *)lock;
132 tracker = (struct rm_priotracker *)how;
133 rm_rlock(rm, tracker);
138 unlock_rm(struct lock_object *lock)
143 struct rm_queue *queue;
144 struct rm_priotracker *tracker;
147 rm = (struct rmlock *)lock;
150 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
155 * Find the right rm_priotracker structure for curthread.
156 * The guarantee about its uniqueness is given by the fact
157 * we already asserted the lock wasn't recursively acquired.
162 for (queue = pc->pc_rm_queue.rmq_next;
163 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
164 tracker = (struct rm_priotracker *)queue;
165 if ((tracker->rmp_rmlock == rm) &&
166 (tracker->rmp_thread == td)) {
167 how = (uintptr_t)tracker;
171 KASSERT(tracker != NULL,
172 ("rm_priotracker is non-NULL when lock held in read mode"));
174 rm_runlock(rm, tracker);
181 owner_rm(const struct lock_object *lock, struct thread **owner)
183 const struct rmlock *rm;
184 struct lock_class *lc;
186 rm = (const struct rmlock *)lock;
187 lc = LOCK_CLASS(&rm->rm_wlock_object);
188 return (lc->lc_owner(&rm->rm_wlock_object, owner));
192 static struct mtx rm_spinlock;
194 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
197 * Add or remove tracker from per-cpu list.
199 * The per-cpu list can be traversed at any time in forward direction from an
200 * interrupt on the *local* cpu.
203 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
205 struct rm_queue *next;
207 /* Initialize all tracker pointers */
208 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
209 next = pc->pc_rm_queue.rmq_next;
210 tracker->rmp_cpuQueue.rmq_next = next;
212 /* rmq_prev is not used during froward traversal. */
213 next->rmq_prev = &tracker->rmp_cpuQueue;
215 /* Update pointer to first element. */
216 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
220 * Return a count of the number of trackers the thread 'td' already
221 * has on this CPU for the lock 'rm'.
224 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
225 const struct thread *td)
227 struct rm_queue *queue;
228 struct rm_priotracker *tracker;
232 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
233 queue = queue->rmq_next) {
234 tracker = (struct rm_priotracker *)queue;
235 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
242 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
244 struct rm_queue *next, *prev;
246 next = tracker->rmp_cpuQueue.rmq_next;
247 prev = tracker->rmp_cpuQueue.rmq_prev;
249 /* Not used during forward traversal. */
250 next->rmq_prev = prev;
252 /* Remove from list. */
253 prev->rmq_next = next;
257 rm_cleanIPI(void *arg)
260 struct rmlock *rm = arg;
261 struct rm_priotracker *tracker;
262 struct rm_queue *queue;
265 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
266 queue = queue->rmq_next) {
267 tracker = (struct rm_priotracker *)queue;
268 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
269 tracker->rmp_flags = RMPF_ONQUEUE;
270 mtx_lock_spin(&rm_spinlock);
271 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
273 mtx_unlock_spin(&rm_spinlock);
279 rm_init_flags(struct rmlock *rm, const char *name, int opts)
281 struct lock_class *lc;
285 if (!(opts & RM_NOWITNESS))
286 liflags |= LO_WITNESS;
287 if (opts & RM_RECURSE)
288 liflags |= LO_RECURSABLE;
293 rm->rm_writecpus = all_cpus;
294 LIST_INIT(&rm->rm_activeReaders);
295 if (opts & RM_SLEEPABLE) {
296 liflags |= LO_SLEEPABLE;
297 lc = &lock_class_rm_sleepable;
298 xflags = (opts & RM_NEW ? SX_NEW : 0);
299 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
300 xflags | SX_NOWITNESS);
303 xflags = (opts & RM_NEW ? MTX_NEW : 0);
304 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
305 xflags | MTX_NOWITNESS);
307 lock_init(&rm->lock_object, lc, name, NULL, liflags);
311 rm_init(struct rmlock *rm, const char *name)
314 rm_init_flags(rm, name, 0);
318 rm_destroy(struct rmlock *rm)
321 rm_assert(rm, RA_UNLOCKED);
322 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
323 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
324 sx_destroy(&rm->rm_lock_sx);
326 mtx_destroy(&rm->rm_lock_mtx);
327 lock_destroy(&rm->lock_object);
331 rm_wowned(const struct rmlock *rm)
334 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
335 return (sx_xlocked(&rm->rm_lock_sx));
337 return (mtx_owned(&rm->rm_lock_mtx));
341 rm_sysinit(void *arg)
343 struct rm_args *args;
346 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
349 static __noinline int
350 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
357 /* Check if we just need to do a proper critical_exit. */
358 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
363 /* Remove our tracker from the per-cpu list. */
364 rm_tracker_remove(pc, tracker);
367 * Check to see if the IPI granted us the lock after all. The load of
368 * rmp_flags must happen after the tracker is removed from the list.
371 if (tracker->rmp_flags) {
372 /* Just add back tracker - we hold the lock. */
373 rm_tracker_add(pc, tracker);
379 * We allow readers to acquire a lock even if a writer is blocked if
380 * the lock is recursive and the reader already holds the lock.
382 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
384 * Just grant the lock if this thread already has a tracker
385 * for this lock on the per-cpu queue.
387 if (rm_trackers_present(pc, rm, curthread) != 0) {
388 mtx_lock_spin(&rm_spinlock);
389 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
391 tracker->rmp_flags = RMPF_ONQUEUE;
392 mtx_unlock_spin(&rm_spinlock);
393 rm_tracker_add(pc, tracker);
403 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
404 if (!sx_try_xlock(&rm->rm_lock_sx))
407 if (!mtx_trylock(&rm->rm_lock_mtx))
411 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
412 THREAD_SLEEPING_OK();
413 sx_xlock(&rm->rm_lock_sx);
414 THREAD_NO_SLEEPING();
416 mtx_lock(&rm->rm_lock_mtx);
421 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
422 rm_tracker_add(pc, tracker);
426 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
427 sx_xunlock(&rm->rm_lock_sx);
429 mtx_unlock(&rm->rm_lock_mtx);
435 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
437 struct thread *td = curthread;
440 if (SCHEDULER_STOPPED())
443 tracker->rmp_flags = 0;
444 tracker->rmp_thread = td;
445 tracker->rmp_rmlock = rm;
447 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
448 THREAD_NO_SLEEPING();
450 td->td_critnest++; /* critical_enter(); */
454 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
456 rm_tracker_add(pc, tracker);
465 * Fast path to combine two common conditions into a single
468 if (__predict_true(0 == (td->td_owepreempt |
469 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))))
472 /* We do not have a read token and need to acquire one. */
473 return _rm_rlock_hard(rm, tracker, trylock);
476 static __noinline void
477 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
480 if (td->td_owepreempt) {
485 if (!tracker->rmp_flags)
488 mtx_lock_spin(&rm_spinlock);
489 LIST_REMOVE(tracker, rmp_qentry);
491 if (tracker->rmp_flags & RMPF_SIGNAL) {
493 struct turnstile *ts;
495 rm = tracker->rmp_rmlock;
497 turnstile_chain_lock(&rm->lock_object);
498 mtx_unlock_spin(&rm_spinlock);
500 ts = turnstile_lookup(&rm->lock_object);
502 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
503 turnstile_unpend(ts);
504 turnstile_chain_unlock(&rm->lock_object);
506 mtx_unlock_spin(&rm_spinlock);
510 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
513 struct thread *td = tracker->rmp_thread;
515 if (SCHEDULER_STOPPED())
518 td->td_critnest++; /* critical_enter(); */
519 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
520 rm_tracker_remove(pc, tracker);
524 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
525 THREAD_SLEEPING_OK();
527 if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags)))
530 _rm_unlock_hard(td, tracker);
534 _rm_wlock(struct rmlock *rm)
536 struct rm_priotracker *prio;
537 struct turnstile *ts;
540 if (SCHEDULER_STOPPED())
543 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
544 sx_xlock(&rm->rm_lock_sx);
546 mtx_lock(&rm->rm_lock_mtx);
548 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
549 /* Get all read tokens back */
551 CPU_NAND(&readcpus, &rm->rm_writecpus);
552 rm->rm_writecpus = all_cpus;
555 * Assumes rm->rm_writecpus update is visible on other CPUs
556 * before rm_cleanIPI is called.
559 smp_rendezvous_cpus(readcpus,
560 smp_no_rendezvous_barrier,
562 smp_no_rendezvous_barrier,
569 mtx_lock_spin(&rm_spinlock);
570 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
571 ts = turnstile_trywait(&rm->lock_object);
572 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
573 mtx_unlock_spin(&rm_spinlock);
574 turnstile_wait(ts, prio->rmp_thread,
576 mtx_lock_spin(&rm_spinlock);
578 mtx_unlock_spin(&rm_spinlock);
583 _rm_wunlock(struct rmlock *rm)
586 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
587 sx_xunlock(&rm->rm_lock_sx);
589 mtx_unlock(&rm->rm_lock_mtx);
595 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
598 if (SCHEDULER_STOPPED())
601 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
602 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
603 curthread, rm->lock_object.lo_name, file, line));
604 KASSERT(!rm_destroyed(rm),
605 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
606 _rm_assert(rm, RA_UNLOCKED, file, line);
608 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
613 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
614 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
615 TD_LOCKS_INC(curthread);
619 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
622 if (SCHEDULER_STOPPED())
625 KASSERT(!rm_destroyed(rm),
626 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
627 _rm_assert(rm, RA_WLOCKED, file, line);
628 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
629 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
631 TD_LOCKS_DEC(curthread);
635 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
636 int trylock, const char *file, int line)
639 if (SCHEDULER_STOPPED())
643 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
645 KASSERT(rm_trackers_present(get_pcpu(), rm,
647 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
648 rm->lock_object.lo_name, file, line));
652 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
653 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
654 curthread, rm->lock_object.lo_name, file, line));
655 KASSERT(!rm_destroyed(rm),
656 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
658 KASSERT(!rm_wowned(rm),
659 ("rm_rlock: wlock already held for %s @ %s:%d",
660 rm->lock_object.lo_name, file, line));
661 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
665 if (_rm_rlock(rm, tracker, trylock)) {
667 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
670 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
672 WITNESS_LOCK(&rm->lock_object, 0, file, line);
673 TD_LOCKS_INC(curthread);
676 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
682 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
683 const char *file, int line)
686 if (SCHEDULER_STOPPED())
689 KASSERT(!rm_destroyed(rm),
690 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
691 _rm_assert(rm, RA_RLOCKED, file, line);
692 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
693 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
694 _rm_runlock(rm, tracker);
695 TD_LOCKS_DEC(curthread);
701 * Just strip out file and line arguments if no lock debugging is enabled in
702 * the kernel - we are called from a kernel module.
705 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
712 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
719 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
720 int trylock, const char *file, int line)
723 return _rm_rlock(rm, tracker, trylock);
727 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
728 const char *file, int line)
731 _rm_runlock(rm, tracker);
736 #ifdef INVARIANT_SUPPORT
742 * Note that this does not need to use witness_assert() for read lock
743 * assertions since an exact count of read locks held by this thread
747 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
751 if (SCHEDULER_STOPPED())
755 case RA_LOCKED | RA_RECURSED:
756 case RA_LOCKED | RA_NOTRECURSED:
758 case RA_RLOCKED | RA_RECURSED:
759 case RA_RLOCKED | RA_NOTRECURSED:
761 * Handle the write-locked case. Unlike other
762 * primitives, writers can never recurse.
765 if (what & RA_RLOCKED)
766 panic("Lock %s exclusively locked @ %s:%d\n",
767 rm->lock_object.lo_name, file, line);
768 if (what & RA_RECURSED)
769 panic("Lock %s not recursed @ %s:%d\n",
770 rm->lock_object.lo_name, file, line);
775 count = rm_trackers_present(get_pcpu(), rm, curthread);
779 panic("Lock %s not %slocked @ %s:%d\n",
780 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
781 "read " : "", file, line);
783 if (what & RA_NOTRECURSED)
784 panic("Lock %s recursed @ %s:%d\n",
785 rm->lock_object.lo_name, file, line);
786 } else if (what & RA_RECURSED)
787 panic("Lock %s not recursed @ %s:%d\n",
788 rm->lock_object.lo_name, file, line);
792 panic("Lock %s not exclusively locked @ %s:%d\n",
793 rm->lock_object.lo_name, file, line);
797 panic("Lock %s exclusively locked @ %s:%d\n",
798 rm->lock_object.lo_name, file, line);
801 count = rm_trackers_present(get_pcpu(), rm, curthread);
805 panic("Lock %s read locked @ %s:%d\n",
806 rm->lock_object.lo_name, file, line);
809 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
813 #endif /* INVARIANT_SUPPORT */
817 print_tracker(struct rm_priotracker *tr)
822 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
823 td->td_proc->p_pid, td->td_name);
824 if (tr->rmp_flags & RMPF_ONQUEUE) {
825 db_printf("ONQUEUE");
826 if (tr->rmp_flags & RMPF_SIGNAL)
827 db_printf(",SIGNAL");
834 db_show_rm(const struct lock_object *lock)
836 struct rm_priotracker *tr;
837 struct rm_queue *queue;
838 const struct rmlock *rm;
839 struct lock_class *lc;
842 rm = (const struct rmlock *)lock;
843 db_printf(" writecpus: ");
844 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
846 db_printf(" per-CPU readers:\n");
847 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
848 for (queue = pc->pc_rm_queue.rmq_next;
849 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
850 tr = (struct rm_priotracker *)queue;
851 if (tr->rmp_rmlock == rm)
854 db_printf(" active readers:\n");
855 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
857 lc = LOCK_CLASS(&rm->rm_wlock_object);
858 db_printf("Backing write-lock (%s):\n", lc->lc_name);
859 lc->lc_ddb_show(&rm->rm_wlock_object);