2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Machine independent bits of reader/writer lock implementation.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/kernel.h>
48 #include <sys/mutex.h>
50 #include <sys/rmlock.h>
51 #include <sys/sched.h>
53 #include <sys/turnstile.h>
54 #include <sys/lock_profile.h>
55 #include <machine/cpu.h>
63 * A cookie to mark destroyed rmlocks. This is stored in the head of
66 #define RM_DESTROYED ((void *)0xdead)
68 #define rm_destroyed(rm) \
69 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
71 #define RMPF_ONQUEUE 1
75 #define _rm_assert(c, what, file, line)
78 static void assert_rm(const struct lock_object *lock, int what);
80 static void db_show_rm(const struct lock_object *lock);
82 static void lock_rm(struct lock_object *lock, uintptr_t how);
84 static int owner_rm(const struct lock_object *lock, struct thread **owner);
86 static uintptr_t unlock_rm(struct lock_object *lock);
88 struct lock_class lock_class_rm = {
90 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
91 .lc_assert = assert_rm,
93 .lc_ddb_show = db_show_rm,
96 .lc_unlock = unlock_rm,
102 struct lock_class lock_class_rm_sleepable = {
103 .lc_name = "sleepable rm",
104 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
105 .lc_assert = assert_rm,
107 .lc_ddb_show = db_show_rm,
110 .lc_unlock = unlock_rm,
112 .lc_owner = owner_rm,
117 assert_rm(const struct lock_object *lock, int what)
120 rm_assert((const struct rmlock *)lock, what);
124 lock_rm(struct lock_object *lock, uintptr_t how)
127 struct rm_priotracker *tracker;
129 rm = (struct rmlock *)lock;
133 tracker = (struct rm_priotracker *)how;
134 rm_rlock(rm, tracker);
139 unlock_rm(struct lock_object *lock)
144 struct rm_queue *queue;
145 struct rm_priotracker *tracker;
148 rm = (struct rmlock *)lock;
151 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
156 * Find the right rm_priotracker structure for curthread.
157 * The guarantee about its uniqueness is given by the fact
158 * we already asserted the lock wasn't recursively acquired.
163 for (queue = pc->pc_rm_queue.rmq_next;
164 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
165 tracker = (struct rm_priotracker *)queue;
166 if ((tracker->rmp_rmlock == rm) &&
167 (tracker->rmp_thread == td)) {
168 how = (uintptr_t)tracker;
172 KASSERT(tracker != NULL,
173 ("rm_priotracker is non-NULL when lock held in read mode"));
175 rm_runlock(rm, tracker);
182 owner_rm(const struct lock_object *lock, struct thread **owner)
184 const struct rmlock *rm;
185 struct lock_class *lc;
187 rm = (const struct rmlock *)lock;
188 lc = LOCK_CLASS(&rm->rm_wlock_object);
189 return (lc->lc_owner(&rm->rm_wlock_object, owner));
193 static struct mtx rm_spinlock;
195 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
198 * Add or remove tracker from per-cpu list.
200 * The per-cpu list can be traversed at any time in forward direction from an
201 * interrupt on the *local* cpu.
204 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
206 struct rm_queue *next;
208 /* Initialize all tracker pointers */
209 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
210 next = pc->pc_rm_queue.rmq_next;
211 tracker->rmp_cpuQueue.rmq_next = next;
213 /* rmq_prev is not used during froward traversal. */
214 next->rmq_prev = &tracker->rmp_cpuQueue;
216 /* Update pointer to first element. */
217 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
221 * Return a count of the number of trackers the thread 'td' already
222 * has on this CPU for the lock 'rm'.
225 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
226 const struct thread *td)
228 struct rm_queue *queue;
229 struct rm_priotracker *tracker;
233 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
234 queue = queue->rmq_next) {
235 tracker = (struct rm_priotracker *)queue;
236 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
243 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
245 struct rm_queue *next, *prev;
247 next = tracker->rmp_cpuQueue.rmq_next;
248 prev = tracker->rmp_cpuQueue.rmq_prev;
250 /* Not used during forward traversal. */
251 next->rmq_prev = prev;
253 /* Remove from list. */
254 prev->rmq_next = next;
258 rm_cleanIPI(void *arg)
261 struct rmlock *rm = arg;
262 struct rm_priotracker *tracker;
263 struct rm_queue *queue;
266 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
267 queue = queue->rmq_next) {
268 tracker = (struct rm_priotracker *)queue;
269 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
270 tracker->rmp_flags = RMPF_ONQUEUE;
271 mtx_lock_spin(&rm_spinlock);
272 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
274 mtx_unlock_spin(&rm_spinlock);
280 rm_init_flags(struct rmlock *rm, const char *name, int opts)
282 struct lock_class *lc;
286 if (!(opts & RM_NOWITNESS))
287 liflags |= LO_WITNESS;
288 if (opts & RM_RECURSE)
289 liflags |= LO_RECURSABLE;
294 rm->rm_writecpus = all_cpus;
295 LIST_INIT(&rm->rm_activeReaders);
296 if (opts & RM_SLEEPABLE) {
297 liflags |= LO_SLEEPABLE;
298 lc = &lock_class_rm_sleepable;
299 xflags = (opts & RM_NEW ? SX_NEW : 0);
300 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
301 xflags | SX_NOWITNESS);
304 xflags = (opts & RM_NEW ? MTX_NEW : 0);
305 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
306 xflags | MTX_NOWITNESS);
308 lock_init(&rm->lock_object, lc, name, NULL, liflags);
312 rm_init(struct rmlock *rm, const char *name)
315 rm_init_flags(rm, name, 0);
319 rm_destroy(struct rmlock *rm)
322 rm_assert(rm, RA_UNLOCKED);
323 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
324 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
325 sx_destroy(&rm->rm_lock_sx);
327 mtx_destroy(&rm->rm_lock_mtx);
328 lock_destroy(&rm->lock_object);
332 rm_wowned(const struct rmlock *rm)
335 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
336 return (sx_xlocked(&rm->rm_lock_sx));
338 return (mtx_owned(&rm->rm_lock_mtx));
342 rm_sysinit(void *arg)
344 struct rm_args *args;
347 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
350 static __noinline int
351 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
358 /* Check if we just need to do a proper critical_exit. */
359 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
364 /* Remove our tracker from the per-cpu list. */
365 rm_tracker_remove(pc, tracker);
368 * Check to see if the IPI granted us the lock after all. The load of
369 * rmp_flags must happen after the tracker is removed from the list.
371 atomic_interrupt_fence();
372 if (tracker->rmp_flags) {
373 /* Just add back tracker - we hold the lock. */
374 rm_tracker_add(pc, tracker);
380 * We allow readers to acquire a lock even if a writer is blocked if
381 * the lock is recursive and the reader already holds the lock.
383 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
385 * Just grant the lock if this thread already has a tracker
386 * for this lock on the per-cpu queue.
388 if (rm_trackers_present(pc, rm, curthread) != 0) {
389 mtx_lock_spin(&rm_spinlock);
390 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
392 tracker->rmp_flags = RMPF_ONQUEUE;
393 mtx_unlock_spin(&rm_spinlock);
394 rm_tracker_add(pc, tracker);
404 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
405 if (!sx_try_xlock(&rm->rm_lock_sx))
408 if (!mtx_trylock(&rm->rm_lock_mtx))
412 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
413 THREAD_SLEEPING_OK();
414 sx_xlock(&rm->rm_lock_sx);
415 THREAD_NO_SLEEPING();
417 mtx_lock(&rm->rm_lock_mtx);
422 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
423 rm_tracker_add(pc, tracker);
427 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
428 sx_xunlock(&rm->rm_lock_sx);
430 mtx_unlock(&rm->rm_lock_mtx);
436 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
438 struct thread *td = curthread;
441 if (SCHEDULER_STOPPED())
444 tracker->rmp_flags = 0;
445 tracker->rmp_thread = td;
446 tracker->rmp_rmlock = rm;
448 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
449 THREAD_NO_SLEEPING();
451 td->td_critnest++; /* critical_enter(); */
452 atomic_interrupt_fence();
454 pc = cpuid_to_pcpu[td->td_oncpu];
455 rm_tracker_add(pc, tracker);
458 atomic_interrupt_fence();
462 * Fast path to combine two common conditions into a single
465 if (__predict_true(0 == (td->td_owepreempt |
466 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))))
469 /* We do not have a read token and need to acquire one. */
470 return _rm_rlock_hard(rm, tracker, trylock);
473 static __noinline void
474 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
477 if (td->td_owepreempt) {
482 if (!tracker->rmp_flags)
485 mtx_lock_spin(&rm_spinlock);
486 LIST_REMOVE(tracker, rmp_qentry);
488 if (tracker->rmp_flags & RMPF_SIGNAL) {
490 struct turnstile *ts;
492 rm = tracker->rmp_rmlock;
494 turnstile_chain_lock(&rm->lock_object);
495 mtx_unlock_spin(&rm_spinlock);
497 ts = turnstile_lookup(&rm->lock_object);
499 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
500 turnstile_unpend(ts);
501 turnstile_chain_unlock(&rm->lock_object);
503 mtx_unlock_spin(&rm_spinlock);
507 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
510 struct thread *td = tracker->rmp_thread;
512 if (SCHEDULER_STOPPED())
515 td->td_critnest++; /* critical_enter(); */
516 atomic_interrupt_fence();
518 pc = cpuid_to_pcpu[td->td_oncpu];
519 rm_tracker_remove(pc, tracker);
521 atomic_interrupt_fence();
525 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
526 THREAD_SLEEPING_OK();
528 if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags)))
531 _rm_unlock_hard(td, tracker);
535 _rm_wlock(struct rmlock *rm)
537 struct rm_priotracker *prio;
538 struct turnstile *ts;
541 if (SCHEDULER_STOPPED())
544 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
545 sx_xlock(&rm->rm_lock_sx);
547 mtx_lock(&rm->rm_lock_mtx);
549 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
550 /* Get all read tokens back */
552 CPU_ANDNOT(&readcpus, &readcpus, &rm->rm_writecpus);
553 rm->rm_writecpus = all_cpus;
556 * Assumes rm->rm_writecpus update is visible on other CPUs
557 * before rm_cleanIPI is called.
560 smp_rendezvous_cpus(readcpus,
561 smp_no_rendezvous_barrier,
563 smp_no_rendezvous_barrier,
570 mtx_lock_spin(&rm_spinlock);
571 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
572 ts = turnstile_trywait(&rm->lock_object);
573 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
574 mtx_unlock_spin(&rm_spinlock);
575 turnstile_wait(ts, prio->rmp_thread,
577 mtx_lock_spin(&rm_spinlock);
579 mtx_unlock_spin(&rm_spinlock);
584 _rm_wunlock(struct rmlock *rm)
587 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
588 sx_xunlock(&rm->rm_lock_sx);
590 mtx_unlock(&rm->rm_lock_mtx);
596 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
599 if (SCHEDULER_STOPPED())
602 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
603 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
604 curthread, rm->lock_object.lo_name, file, line));
605 KASSERT(!rm_destroyed(rm),
606 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
607 _rm_assert(rm, RA_UNLOCKED, file, line);
609 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
614 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
615 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
616 TD_LOCKS_INC(curthread);
620 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
623 if (SCHEDULER_STOPPED())
626 KASSERT(!rm_destroyed(rm),
627 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
628 _rm_assert(rm, RA_WLOCKED, file, line);
629 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
630 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
632 TD_LOCKS_DEC(curthread);
636 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
637 int trylock, const char *file, int line)
640 if (SCHEDULER_STOPPED())
644 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
646 KASSERT(rm_trackers_present(get_pcpu(), rm,
648 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
649 rm->lock_object.lo_name, file, line));
653 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
654 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
655 curthread, rm->lock_object.lo_name, file, line));
656 KASSERT(!rm_destroyed(rm),
657 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
659 KASSERT(!rm_wowned(rm),
660 ("rm_rlock: wlock already held for %s @ %s:%d",
661 rm->lock_object.lo_name, file, line));
662 WITNESS_CHECKORDER(&rm->lock_object,
663 LOP_NEWORDER | LOP_NOSLEEP, file, line, NULL);
666 if (_rm_rlock(rm, tracker, trylock)) {
668 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
671 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
673 WITNESS_LOCK(&rm->lock_object, LOP_NOSLEEP, file, line);
674 TD_LOCKS_INC(curthread);
677 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
683 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
684 const char *file, int line)
687 if (SCHEDULER_STOPPED())
690 KASSERT(!rm_destroyed(rm),
691 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
692 _rm_assert(rm, RA_RLOCKED, file, line);
693 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
694 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
695 _rm_runlock(rm, tracker);
696 TD_LOCKS_DEC(curthread);
702 * Just strip out file and line arguments if no lock debugging is enabled in
703 * the kernel - we are called from a kernel module.
706 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
713 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
720 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
721 int trylock, const char *file, int line)
724 return _rm_rlock(rm, tracker, trylock);
728 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
729 const char *file, int line)
732 _rm_runlock(rm, tracker);
737 #ifdef INVARIANT_SUPPORT
743 * Note that this does not need to use witness_assert() for read lock
744 * assertions since an exact count of read locks held by this thread
748 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
752 if (SCHEDULER_STOPPED())
756 case RA_LOCKED | RA_RECURSED:
757 case RA_LOCKED | RA_NOTRECURSED:
759 case RA_RLOCKED | RA_RECURSED:
760 case RA_RLOCKED | RA_NOTRECURSED:
762 * Handle the write-locked case. Unlike other
763 * primitives, writers can never recurse.
766 if (what & RA_RLOCKED)
767 panic("Lock %s exclusively locked @ %s:%d\n",
768 rm->lock_object.lo_name, file, line);
769 if (what & RA_RECURSED)
770 panic("Lock %s not recursed @ %s:%d\n",
771 rm->lock_object.lo_name, file, line);
776 count = rm_trackers_present(get_pcpu(), rm, curthread);
780 panic("Lock %s not %slocked @ %s:%d\n",
781 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
782 "read " : "", file, line);
784 if (what & RA_NOTRECURSED)
785 panic("Lock %s recursed @ %s:%d\n",
786 rm->lock_object.lo_name, file, line);
787 } else if (what & RA_RECURSED)
788 panic("Lock %s not recursed @ %s:%d\n",
789 rm->lock_object.lo_name, file, line);
793 panic("Lock %s not exclusively locked @ %s:%d\n",
794 rm->lock_object.lo_name, file, line);
798 panic("Lock %s exclusively locked @ %s:%d\n",
799 rm->lock_object.lo_name, file, line);
802 count = rm_trackers_present(get_pcpu(), rm, curthread);
806 panic("Lock %s read locked @ %s:%d\n",
807 rm->lock_object.lo_name, file, line);
810 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
814 #endif /* INVARIANT_SUPPORT */
818 print_tracker(struct rm_priotracker *tr)
823 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
824 td->td_proc->p_pid, td->td_name);
825 if (tr->rmp_flags & RMPF_ONQUEUE) {
826 db_printf("ONQUEUE");
827 if (tr->rmp_flags & RMPF_SIGNAL)
828 db_printf(",SIGNAL");
835 db_show_rm(const struct lock_object *lock)
837 struct rm_priotracker *tr;
838 struct rm_queue *queue;
839 const struct rmlock *rm;
840 struct lock_class *lc;
843 rm = (const struct rmlock *)lock;
844 db_printf(" writecpus: ");
845 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
847 db_printf(" per-CPU readers:\n");
848 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
849 for (queue = pc->pc_rm_queue.rmq_next;
850 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
851 tr = (struct rm_priotracker *)queue;
852 if (tr->rmp_rmlock == rm)
855 db_printf(" active readers:\n");
856 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
858 lc = LOCK_CLASS(&rm->rm_wlock_object);
859 db_printf("Backing write-lock (%s):\n", lc->lc_name);
860 lc->lc_ddb_show(&rm->rm_wlock_object);
865 * Read-mostly sleepable locks.
867 * These primitives allow both readers and writers to sleep. However, neither
868 * readers nor writers are tracked and subsequently there is no priority
871 * They are intended to be only used when write-locking is almost never needed
872 * (e.g., they can guard against unloading a kernel module) while read-locking
873 * happens all the time.
875 * Concurrent writers take turns taking the lock while going off cpu. If this is
876 * of concern for your usecase, this is not the right primitive.
878 * Neither rms_rlock nor rms_runlock use thread fences. Instead interrupt
879 * fences are inserted to ensure ordering with the code executed in the IPI
882 * No attempt is made to track which CPUs read locked at least once,
883 * consequently write locking sends IPIs to all of them. This will become a
884 * problem at some point. The easiest way to lessen it is to provide a bitmap.
887 #define RMS_NOOWNER ((void *)0x1)
888 #define RMS_TRANSIENT ((void *)0x2)
889 #define RMS_FLAGMASK 0xf
891 struct rmslock_pcpu {
896 _Static_assert(sizeof(struct rmslock_pcpu) == 8, "bad size");
901 static struct rmslock_pcpu *
902 rms_int_pcpu(struct rmslock *rms)
905 CRITICAL_ASSERT(curthread);
906 return (zpcpu_get(rms->pcpu));
909 static struct rmslock_pcpu *
910 rms_int_remote_pcpu(struct rmslock *rms, int cpu)
913 return (zpcpu_get_cpu(rms->pcpu, cpu));
917 rms_int_influx_enter(struct rmslock *rms, struct rmslock_pcpu *pcpu)
920 CRITICAL_ASSERT(curthread);
921 MPASS(pcpu->influx == 0);
926 rms_int_influx_exit(struct rmslock *rms, struct rmslock_pcpu *pcpu)
929 CRITICAL_ASSERT(curthread);
930 MPASS(pcpu->influx == 1);
936 rms_int_debug_readers_inc(struct rmslock *rms)
939 old = atomic_fetchadd_int(&rms->debug_readers, 1);
940 KASSERT(old >= 0, ("%s: bad readers count %d\n", __func__, old));
944 rms_int_debug_readers_dec(struct rmslock *rms)
948 old = atomic_fetchadd_int(&rms->debug_readers, -1);
949 KASSERT(old > 0, ("%s: bad readers count %d\n", __func__, old));
953 rms_int_debug_readers_inc(struct rmslock *rms)
958 rms_int_debug_readers_dec(struct rmslock *rms)
964 rms_int_readers_inc(struct rmslock *rms, struct rmslock_pcpu *pcpu)
967 CRITICAL_ASSERT(curthread);
968 rms_int_debug_readers_inc(rms);
973 rms_int_readers_dec(struct rmslock *rms, struct rmslock_pcpu *pcpu)
976 CRITICAL_ASSERT(curthread);
977 rms_int_debug_readers_dec(rms);
985 rms_init(struct rmslock *rms, const char *name)
988 rms->owner = RMS_NOOWNER;
991 rms->debug_readers = 0;
992 mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW);
993 rms->pcpu = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK | M_ZERO);
997 rms_destroy(struct rmslock *rms)
1000 MPASS(rms->writers == 0);
1001 MPASS(rms->readers == 0);
1002 mtx_destroy(&rms->mtx);
1003 uma_zfree_pcpu(pcpu_zone_8, rms->pcpu);
1006 static void __noinline
1007 rms_rlock_fallback(struct rmslock *rms)
1010 rms_int_influx_exit(rms, rms_int_pcpu(rms));
1013 mtx_lock(&rms->mtx);
1014 while (rms->writers > 0)
1015 msleep(&rms->readers, &rms->mtx, PUSER - 1, mtx_name(&rms->mtx), 0);
1017 rms_int_readers_inc(rms, rms_int_pcpu(rms));
1018 mtx_unlock(&rms->mtx);
1020 TD_LOCKS_INC(curthread);
1024 rms_rlock(struct rmslock *rms)
1026 struct rmslock_pcpu *pcpu;
1028 rms_assert_rlock_ok(rms);
1029 MPASS(atomic_load_ptr(&rms->owner) != curthread);
1032 pcpu = rms_int_pcpu(rms);
1033 rms_int_influx_enter(rms, pcpu);
1034 atomic_interrupt_fence();
1035 if (__predict_false(rms->writers > 0)) {
1036 rms_rlock_fallback(rms);
1039 atomic_interrupt_fence();
1040 rms_int_readers_inc(rms, pcpu);
1041 atomic_interrupt_fence();
1042 rms_int_influx_exit(rms, pcpu);
1044 TD_LOCKS_INC(curthread);
1048 rms_try_rlock(struct rmslock *rms)
1050 struct rmslock_pcpu *pcpu;
1052 MPASS(atomic_load_ptr(&rms->owner) != curthread);
1055 pcpu = rms_int_pcpu(rms);
1056 rms_int_influx_enter(rms, pcpu);
1057 atomic_interrupt_fence();
1058 if (__predict_false(rms->writers > 0)) {
1059 rms_int_influx_exit(rms, pcpu);
1063 atomic_interrupt_fence();
1064 rms_int_readers_inc(rms, pcpu);
1065 atomic_interrupt_fence();
1066 rms_int_influx_exit(rms, pcpu);
1068 TD_LOCKS_INC(curthread);
1072 static void __noinline
1073 rms_runlock_fallback(struct rmslock *rms)
1076 rms_int_influx_exit(rms, rms_int_pcpu(rms));
1079 mtx_lock(&rms->mtx);
1080 MPASS(rms->writers > 0);
1081 MPASS(rms->readers > 0);
1082 MPASS(rms->debug_readers == rms->readers);
1083 rms_int_debug_readers_dec(rms);
1085 if (rms->readers == 0)
1086 wakeup_one(&rms->writers);
1087 mtx_unlock(&rms->mtx);
1088 TD_LOCKS_DEC(curthread);
1092 rms_runlock(struct rmslock *rms)
1094 struct rmslock_pcpu *pcpu;
1097 pcpu = rms_int_pcpu(rms);
1098 rms_int_influx_enter(rms, pcpu);
1099 atomic_interrupt_fence();
1100 if (__predict_false(rms->writers > 0)) {
1101 rms_runlock_fallback(rms);
1104 atomic_interrupt_fence();
1105 rms_int_readers_dec(rms, pcpu);
1106 atomic_interrupt_fence();
1107 rms_int_influx_exit(rms, pcpu);
1109 TD_LOCKS_DEC(curthread);
1112 struct rmslock_ipi {
1113 struct rmslock *rms;
1114 struct smp_rendezvous_cpus_retry_arg srcra;
1118 rms_action_func(void *arg)
1120 struct rmslock_ipi *rmsipi;
1121 struct rmslock_pcpu *pcpu;
1122 struct rmslock *rms;
1124 rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
1126 pcpu = rms_int_pcpu(rms);
1130 if (pcpu->readers != 0) {
1131 atomic_add_int(&rms->readers, pcpu->readers);
1134 smp_rendezvous_cpus_done(arg);
1138 rms_wait_func(void *arg, int cpu)
1140 struct rmslock_ipi *rmsipi;
1141 struct rmslock_pcpu *pcpu;
1142 struct rmslock *rms;
1144 rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
1146 pcpu = rms_int_remote_pcpu(rms, cpu);
1148 while (atomic_load_int(&pcpu->influx))
1154 rms_assert_no_pcpu_readers(struct rmslock *rms)
1156 struct rmslock_pcpu *pcpu;
1160 pcpu = rms_int_remote_pcpu(rms, cpu);
1161 if (pcpu->readers != 0) {
1162 panic("%s: got %d readers on cpu %d\n", __func__,
1163 pcpu->readers, cpu);
1169 rms_assert_no_pcpu_readers(struct rmslock *rms)
1175 rms_wlock_switch(struct rmslock *rms)
1177 struct rmslock_ipi rmsipi;
1179 MPASS(rms->readers == 0);
1180 MPASS(rms->writers == 1);
1184 smp_rendezvous_cpus_retry(all_cpus,
1185 smp_no_rendezvous_barrier,
1187 smp_no_rendezvous_barrier,
1193 rms_wlock(struct rmslock *rms)
1196 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
1197 MPASS(atomic_load_ptr(&rms->owner) != curthread);
1199 mtx_lock(&rms->mtx);
1201 if (rms->writers > 1) {
1202 msleep(&rms->owner, &rms->mtx, (PUSER - 1),
1203 mtx_name(&rms->mtx), 0);
1204 MPASS(rms->readers == 0);
1205 KASSERT(rms->owner == RMS_TRANSIENT,
1206 ("%s: unexpected owner value %p\n", __func__,
1211 KASSERT(rms->owner == RMS_NOOWNER,
1212 ("%s: unexpected owner value %p\n", __func__, rms->owner));
1214 rms_wlock_switch(rms);
1215 rms_assert_no_pcpu_readers(rms);
1217 if (rms->readers > 0) {
1218 msleep(&rms->writers, &rms->mtx, (PUSER - 1),
1219 mtx_name(&rms->mtx), 0);
1223 rms->owner = curthread;
1224 rms_assert_no_pcpu_readers(rms);
1225 mtx_unlock(&rms->mtx);
1226 MPASS(rms->readers == 0);
1227 TD_LOCKS_INC(curthread);
1231 rms_wunlock(struct rmslock *rms)
1234 mtx_lock(&rms->mtx);
1235 KASSERT(rms->owner == curthread,
1236 ("%s: unexpected owner value %p\n", __func__, rms->owner));
1237 MPASS(rms->writers >= 1);
1238 MPASS(rms->readers == 0);
1240 if (rms->writers > 0) {
1241 wakeup_one(&rms->owner);
1242 rms->owner = RMS_TRANSIENT;
1244 wakeup(&rms->readers);
1245 rms->owner = RMS_NOOWNER;
1247 mtx_unlock(&rms->mtx);
1248 TD_LOCKS_DEC(curthread);
1252 rms_unlock(struct rmslock *rms)
1255 if (rms_wowned(rms))