2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Machine independent bits of reader/writer lock implementation.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_kdtrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/kernel.h>
47 #include <sys/mutex.h>
49 #include <sys/rmlock.h>
50 #include <sys/sched.h>
52 #include <sys/turnstile.h>
53 #include <sys/lock_profile.h>
54 #include <machine/cpu.h>
60 #define RMPF_ONQUEUE 1
64 * To support usage of rmlock in CVs and msleep yet another list for the
65 * priority tracker would be needed. Using this lock for cv and msleep also
66 * does not seem very useful
69 static void assert_rm(struct lock_object *lock, int what);
70 static void lock_rm(struct lock_object *lock, int how);
72 static int owner_rm(struct lock_object *lock, struct thread **owner);
74 static int unlock_rm(struct lock_object *lock);
76 struct lock_class lock_class_rm = {
78 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
79 .lc_assert = assert_rm,
82 .lc_ddb_show = db_show_rwlock,
86 .lc_unlock = unlock_rm,
93 assert_rm(struct lock_object *lock, int what)
96 panic("assert_rm called");
100 lock_rm(struct lock_object *lock, int how)
103 panic("lock_rm called");
107 unlock_rm(struct lock_object *lock)
110 panic("unlock_rm called");
115 owner_rm(struct lock_object *lock, struct thread **owner)
118 panic("owner_rm called");
122 static struct mtx rm_spinlock;
124 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
127 * Add or remove tracker from per-cpu list.
129 * The per-cpu list can be traversed at any time in forward direction from an
130 * interrupt on the *local* cpu.
133 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
135 struct rm_queue *next;
137 /* Initialize all tracker pointers */
138 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
139 next = pc->pc_rm_queue.rmq_next;
140 tracker->rmp_cpuQueue.rmq_next = next;
142 /* rmq_prev is not used during froward traversal. */
143 next->rmq_prev = &tracker->rmp_cpuQueue;
145 /* Update pointer to first element. */
146 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
150 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
152 struct rm_queue *next, *prev;
154 next = tracker->rmp_cpuQueue.rmq_next;
155 prev = tracker->rmp_cpuQueue.rmq_prev;
157 /* Not used during forward traversal. */
158 next->rmq_prev = prev;
160 /* Remove from list. */
161 prev->rmq_next = next;
165 rm_cleanIPI(void *arg)
168 struct rmlock *rm = arg;
169 struct rm_priotracker *tracker;
170 struct rm_queue *queue;
171 pc = pcpu_find(curcpu);
173 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
174 queue = queue->rmq_next) {
175 tracker = (struct rm_priotracker *)queue;
176 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
177 tracker->rmp_flags = RMPF_ONQUEUE;
178 mtx_lock_spin(&rm_spinlock);
179 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
181 mtx_unlock_spin(&rm_spinlock);
186 CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
189 rm_init_flags(struct rmlock *rm, const char *name, int opts)
194 if (!(opts & RM_NOWITNESS))
195 liflags |= LO_WITNESS;
196 if (opts & RM_RECURSE)
197 liflags |= LO_RECURSABLE;
198 rm->rm_writecpus = all_cpus;
199 LIST_INIT(&rm->rm_activeReaders);
200 if (opts & RM_SLEEPABLE) {
201 liflags |= RM_SLEEPABLE;
202 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
204 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
205 lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
209 rm_init(struct rmlock *rm, const char *name)
212 rm_init_flags(rm, name, 0);
216 rm_destroy(struct rmlock *rm)
219 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
220 sx_destroy(&rm->rm_lock_sx);
222 mtx_destroy(&rm->rm_lock_mtx);
223 lock_destroy(&rm->lock_object);
227 rm_wowned(struct rmlock *rm)
230 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
231 return (sx_xlocked(&rm->rm_lock_sx));
233 return (mtx_owned(&rm->rm_lock_mtx));
237 rm_sysinit(void *arg)
239 struct rm_args *args = arg;
241 rm_init(args->ra_rm, args->ra_desc);
245 rm_sysinit_flags(void *arg)
247 struct rm_args_flags *args = arg;
249 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
253 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
256 struct rm_queue *queue;
257 struct rm_priotracker *atracker;
260 pc = pcpu_find(curcpu);
262 /* Check if we just need to do a proper critical_exit. */
263 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
268 /* Remove our tracker from the per-cpu list. */
269 rm_tracker_remove(pc, tracker);
271 /* Check to see if the IPI granted us the lock after all. */
272 if (tracker->rmp_flags) {
273 /* Just add back tracker - we hold the lock. */
274 rm_tracker_add(pc, tracker);
280 * We allow readers to aquire a lock even if a writer is blocked if
281 * the lock is recursive and the reader already holds the lock.
283 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
285 * Just grant the lock if this thread already has a tracker
286 * for this lock on the per-cpu queue.
288 for (queue = pc->pc_rm_queue.rmq_next;
289 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
290 atracker = (struct rm_priotracker *)queue;
291 if ((atracker->rmp_rmlock == rm) &&
292 (atracker->rmp_thread == tracker->rmp_thread)) {
293 mtx_lock_spin(&rm_spinlock);
294 LIST_INSERT_HEAD(&rm->rm_activeReaders,
295 tracker, rmp_qentry);
296 tracker->rmp_flags = RMPF_ONQUEUE;
297 mtx_unlock_spin(&rm_spinlock);
298 rm_tracker_add(pc, tracker);
309 if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
310 if (!sx_try_xlock(&rm->rm_lock_sx))
313 if (!mtx_trylock(&rm->rm_lock_mtx))
317 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
318 sx_xlock(&rm->rm_lock_sx);
320 mtx_lock(&rm->rm_lock_mtx);
324 pc = pcpu_find(curcpu);
325 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
326 rm_tracker_add(pc, tracker);
330 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
331 sx_xunlock(&rm->rm_lock_sx);
333 mtx_unlock(&rm->rm_lock_mtx);
339 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
341 struct thread *td = curthread;
344 if (SCHEDULER_STOPPED())
347 tracker->rmp_flags = 0;
348 tracker->rmp_thread = td;
349 tracker->rmp_rmlock = rm;
351 td->td_critnest++; /* critical_enter(); */
355 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
357 rm_tracker_add(pc, tracker);
366 * Fast path to combine two common conditions into a single
369 if (0 == (td->td_owepreempt |
370 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
373 /* We do not have a read token and need to acquire one. */
374 return _rm_rlock_hard(rm, tracker, trylock);
378 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
381 if (td->td_owepreempt) {
386 if (!tracker->rmp_flags)
389 mtx_lock_spin(&rm_spinlock);
390 LIST_REMOVE(tracker, rmp_qentry);
392 if (tracker->rmp_flags & RMPF_SIGNAL) {
394 struct turnstile *ts;
396 rm = tracker->rmp_rmlock;
398 turnstile_chain_lock(&rm->lock_object);
399 mtx_unlock_spin(&rm_spinlock);
401 ts = turnstile_lookup(&rm->lock_object);
403 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
404 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
405 turnstile_chain_unlock(&rm->lock_object);
407 mtx_unlock_spin(&rm_spinlock);
411 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
414 struct thread *td = tracker->rmp_thread;
416 if (SCHEDULER_STOPPED())
419 td->td_critnest++; /* critical_enter(); */
420 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
421 rm_tracker_remove(pc, tracker);
425 if (0 == (td->td_owepreempt | tracker->rmp_flags))
428 _rm_unlock_hard(td, tracker);
432 _rm_wlock(struct rmlock *rm)
434 struct rm_priotracker *prio;
435 struct turnstile *ts;
438 if (SCHEDULER_STOPPED())
441 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
442 sx_xlock(&rm->rm_lock_sx);
444 mtx_lock(&rm->rm_lock_mtx);
446 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
447 /* Get all read tokens back */
449 CPU_NAND(&readcpus, &rm->rm_writecpus);
450 rm->rm_writecpus = all_cpus;
453 * Assumes rm->rm_writecpus update is visible on other CPUs
454 * before rm_cleanIPI is called.
457 smp_rendezvous_cpus(readcpus,
458 smp_no_rendevous_barrier,
460 smp_no_rendevous_barrier,
467 mtx_lock_spin(&rm_spinlock);
468 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
469 ts = turnstile_trywait(&rm->lock_object);
470 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
471 mtx_unlock_spin(&rm_spinlock);
472 turnstile_wait(ts, prio->rmp_thread,
474 mtx_lock_spin(&rm_spinlock);
476 mtx_unlock_spin(&rm_spinlock);
481 _rm_wunlock(struct rmlock *rm)
484 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
485 sx_xunlock(&rm->rm_lock_sx);
487 mtx_unlock(&rm->rm_lock_mtx);
492 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
495 if (SCHEDULER_STOPPED())
498 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
499 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
500 curthread, rm->lock_object.lo_name, file, line));
501 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
506 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
508 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
509 WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
512 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
514 curthread->td_locks++;
519 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
522 if (SCHEDULER_STOPPED())
525 curthread->td_locks--;
526 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
527 WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
530 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
531 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
536 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
537 int trylock, const char *file, int line)
540 if (SCHEDULER_STOPPED())
543 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
544 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
545 curthread, rm->lock_object.lo_name, file, line));
546 if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
547 WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
549 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
551 if (_rm_rlock(rm, tracker, trylock)) {
552 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
554 WITNESS_LOCK(&rm->lock_object, 0, file, line);
556 curthread->td_locks++;
565 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
566 const char *file, int line)
569 if (SCHEDULER_STOPPED())
572 curthread->td_locks--;
573 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
574 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
575 _rm_runlock(rm, tracker);
581 * Just strip out file and line arguments if no lock debugging is enabled in
582 * the kernel - we are called from a kernel module.
585 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
592 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
599 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
600 int trylock, const char *file, int line)
603 return _rm_rlock(rm, tracker, trylock);
607 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
608 const char *file, int line)
611 _rm_runlock(rm, tracker);