]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_rmlock.c
Do not expose to scheduler caches of single CPU.
[FreeBSD/FreeBSD.git] / sys / kern / kern_rmlock.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the author nor the names of any co-contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 /*
33  * Machine independent bits of reader/writer lock implementation.
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_ddb.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43
44 #include <sys/kernel.h>
45 #include <sys/kdb.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/rmlock.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/turnstile.h>
54 #include <sys/lock_profile.h>
55 #include <machine/cpu.h>
56
57 #ifdef DDB
58 #include <ddb/ddb.h>
59 #endif
60
61 /*
62  * A cookie to mark destroyed rmlocks.  This is stored in the head of
63  * rm_activeReaders.
64  */
65 #define RM_DESTROYED    ((void *)0xdead)
66
67 #define rm_destroyed(rm)                                                \
68         (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
69
70 #define RMPF_ONQUEUE    1
71 #define RMPF_SIGNAL     2
72
73 #ifndef INVARIANTS
74 #define _rm_assert(c, what, file, line)
75 #endif
76
77 static void     assert_rm(const struct lock_object *lock, int what);
78 #ifdef DDB
79 static void     db_show_rm(const struct lock_object *lock);
80 #endif
81 static void     lock_rm(struct lock_object *lock, uintptr_t how);
82 #ifdef KDTRACE_HOOKS
83 static int      owner_rm(const struct lock_object *lock, struct thread **owner);
84 #endif
85 static uintptr_t unlock_rm(struct lock_object *lock);
86
87 struct lock_class lock_class_rm = {
88         .lc_name = "rm",
89         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
90         .lc_assert = assert_rm,
91 #ifdef DDB
92         .lc_ddb_show = db_show_rm,
93 #endif
94         .lc_lock = lock_rm,
95         .lc_unlock = unlock_rm,
96 #ifdef KDTRACE_HOOKS
97         .lc_owner = owner_rm,
98 #endif
99 };
100
101 struct lock_class lock_class_rm_sleepable = {
102         .lc_name = "sleepable rm",
103         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
104         .lc_assert = assert_rm,
105 #ifdef DDB
106         .lc_ddb_show = db_show_rm,
107 #endif
108         .lc_lock = lock_rm,
109         .lc_unlock = unlock_rm,
110 #ifdef KDTRACE_HOOKS
111         .lc_owner = owner_rm,
112 #endif
113 };
114
115 static void
116 assert_rm(const struct lock_object *lock, int what)
117 {
118
119         rm_assert((const struct rmlock *)lock, what);
120 }
121
122 static void
123 lock_rm(struct lock_object *lock, uintptr_t how)
124 {
125         struct rmlock *rm;
126         struct rm_priotracker *tracker;
127
128         rm = (struct rmlock *)lock;
129         if (how == 0)
130                 rm_wlock(rm);
131         else {
132                 tracker = (struct rm_priotracker *)how;
133                 rm_rlock(rm, tracker);
134         }
135 }
136
137 static uintptr_t
138 unlock_rm(struct lock_object *lock)
139 {
140         struct thread *td;
141         struct pcpu *pc;
142         struct rmlock *rm;
143         struct rm_queue *queue;
144         struct rm_priotracker *tracker;
145         uintptr_t how;
146
147         rm = (struct rmlock *)lock;
148         tracker = NULL;
149         how = 0;
150         rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
151         if (rm_wowned(rm))
152                 rm_wunlock(rm);
153         else {
154                 /*
155                  * Find the right rm_priotracker structure for curthread.
156                  * The guarantee about its uniqueness is given by the fact
157                  * we already asserted the lock wasn't recursively acquired.
158                  */
159                 critical_enter();
160                 td = curthread;
161                 pc = get_pcpu();
162                 for (queue = pc->pc_rm_queue.rmq_next;
163                     queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
164                         tracker = (struct rm_priotracker *)queue;
165                                 if ((tracker->rmp_rmlock == rm) &&
166                                     (tracker->rmp_thread == td)) {
167                                         how = (uintptr_t)tracker;
168                                         break;
169                                 }
170                 }
171                 KASSERT(tracker != NULL,
172                     ("rm_priotracker is non-NULL when lock held in read mode"));
173                 critical_exit();
174                 rm_runlock(rm, tracker);
175         }
176         return (how);
177 }
178
179 #ifdef KDTRACE_HOOKS
180 static int
181 owner_rm(const struct lock_object *lock, struct thread **owner)
182 {
183         const struct rmlock *rm;
184         struct lock_class *lc;
185
186         rm = (const struct rmlock *)lock;
187         lc = LOCK_CLASS(&rm->rm_wlock_object);
188         return (lc->lc_owner(&rm->rm_wlock_object, owner));
189 }
190 #endif
191
192 static struct mtx rm_spinlock;
193
194 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
195
196 /*
197  * Add or remove tracker from per-cpu list.
198  *
199  * The per-cpu list can be traversed at any time in forward direction from an
200  * interrupt on the *local* cpu.
201  */
202 static void inline
203 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
204 {
205         struct rm_queue *next;
206
207         /* Initialize all tracker pointers */
208         tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
209         next = pc->pc_rm_queue.rmq_next;
210         tracker->rmp_cpuQueue.rmq_next = next;
211
212         /* rmq_prev is not used during froward traversal. */
213         next->rmq_prev = &tracker->rmp_cpuQueue;
214
215         /* Update pointer to first element. */
216         pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
217 }
218
219 /*
220  * Return a count of the number of trackers the thread 'td' already
221  * has on this CPU for the lock 'rm'.
222  */
223 static int
224 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
225     const struct thread *td)
226 {
227         struct rm_queue *queue;
228         struct rm_priotracker *tracker;
229         int count;
230
231         count = 0;
232         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
233             queue = queue->rmq_next) {
234                 tracker = (struct rm_priotracker *)queue;
235                 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
236                         count++;
237         }
238         return (count);
239 }
240
241 static void inline
242 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
243 {
244         struct rm_queue *next, *prev;
245
246         next = tracker->rmp_cpuQueue.rmq_next;
247         prev = tracker->rmp_cpuQueue.rmq_prev;
248
249         /* Not used during forward traversal. */
250         next->rmq_prev = prev;
251
252         /* Remove from list. */
253         prev->rmq_next = next;
254 }
255
256 static void
257 rm_cleanIPI(void *arg)
258 {
259         struct pcpu *pc;
260         struct rmlock *rm = arg;
261         struct rm_priotracker *tracker;
262         struct rm_queue *queue;
263         pc = get_pcpu();
264
265         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
266             queue = queue->rmq_next) {
267                 tracker = (struct rm_priotracker *)queue;
268                 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
269                         tracker->rmp_flags = RMPF_ONQUEUE;
270                         mtx_lock_spin(&rm_spinlock);
271                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
272                             rmp_qentry);
273                         mtx_unlock_spin(&rm_spinlock);
274                 }
275         }
276 }
277
278 void
279 rm_init_flags(struct rmlock *rm, const char *name, int opts)
280 {
281         struct lock_class *lc;
282         int liflags, xflags;
283
284         liflags = 0;
285         if (!(opts & RM_NOWITNESS))
286                 liflags |= LO_WITNESS;
287         if (opts & RM_RECURSE)
288                 liflags |= LO_RECURSABLE;
289         if (opts & RM_NEW)
290                 liflags |= LO_NEW;
291         if (opts & RM_DUPOK)
292                 liflags |= LO_DUPOK;
293         rm->rm_writecpus = all_cpus;
294         LIST_INIT(&rm->rm_activeReaders);
295         if (opts & RM_SLEEPABLE) {
296                 liflags |= LO_SLEEPABLE;
297                 lc = &lock_class_rm_sleepable;
298                 xflags = (opts & RM_NEW ? SX_NEW : 0);
299                 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
300                     xflags | SX_NOWITNESS);
301         } else {
302                 lc = &lock_class_rm;
303                 xflags = (opts & RM_NEW ? MTX_NEW : 0);
304                 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
305                     xflags | MTX_NOWITNESS);
306         }
307         lock_init(&rm->lock_object, lc, name, NULL, liflags);
308 }
309
310 void
311 rm_init(struct rmlock *rm, const char *name)
312 {
313
314         rm_init_flags(rm, name, 0);
315 }
316
317 void
318 rm_destroy(struct rmlock *rm)
319 {
320
321         rm_assert(rm, RA_UNLOCKED);
322         LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
323         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
324                 sx_destroy(&rm->rm_lock_sx);
325         else
326                 mtx_destroy(&rm->rm_lock_mtx);
327         lock_destroy(&rm->lock_object);
328 }
329
330 int
331 rm_wowned(const struct rmlock *rm)
332 {
333
334         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
335                 return (sx_xlocked(&rm->rm_lock_sx));
336         else
337                 return (mtx_owned(&rm->rm_lock_mtx));
338 }
339
340 void
341 rm_sysinit(void *arg)
342 {
343         struct rm_args *args;
344
345         args = arg;
346         rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
347 }
348
349 static __noinline int
350 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
351 {
352         struct pcpu *pc;
353
354         critical_enter();
355         pc = get_pcpu();
356
357         /* Check if we just need to do a proper critical_exit. */
358         if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
359                 critical_exit();
360                 return (1);
361         }
362
363         /* Remove our tracker from the per-cpu list. */
364         rm_tracker_remove(pc, tracker);
365
366         /*
367          * Check to see if the IPI granted us the lock after all.  The load of
368          * rmp_flags must happen after the tracker is removed from the list.
369          */
370         __compiler_membar();
371         if (tracker->rmp_flags) {
372                 /* Just add back tracker - we hold the lock. */
373                 rm_tracker_add(pc, tracker);
374                 critical_exit();
375                 return (1);
376         }
377
378         /*
379          * We allow readers to acquire a lock even if a writer is blocked if
380          * the lock is recursive and the reader already holds the lock.
381          */
382         if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
383                 /*
384                  * Just grant the lock if this thread already has a tracker
385                  * for this lock on the per-cpu queue.
386                  */
387                 if (rm_trackers_present(pc, rm, curthread) != 0) {
388                         mtx_lock_spin(&rm_spinlock);
389                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
390                             rmp_qentry);
391                         tracker->rmp_flags = RMPF_ONQUEUE;
392                         mtx_unlock_spin(&rm_spinlock);
393                         rm_tracker_add(pc, tracker);
394                         critical_exit();
395                         return (1);
396                 }
397         }
398
399         sched_unpin();
400         critical_exit();
401
402         if (trylock) {
403                 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
404                         if (!sx_try_xlock(&rm->rm_lock_sx))
405                                 return (0);
406                 } else {
407                         if (!mtx_trylock(&rm->rm_lock_mtx))
408                                 return (0);
409                 }
410         } else {
411                 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
412                         THREAD_SLEEPING_OK();
413                         sx_xlock(&rm->rm_lock_sx);
414                         THREAD_NO_SLEEPING();
415                 } else
416                         mtx_lock(&rm->rm_lock_mtx);
417         }
418
419         critical_enter();
420         pc = get_pcpu();
421         CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
422         rm_tracker_add(pc, tracker);
423         sched_pin();
424         critical_exit();
425
426         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
427                 sx_xunlock(&rm->rm_lock_sx);
428         else
429                 mtx_unlock(&rm->rm_lock_mtx);
430
431         return (1);
432 }
433
434 int
435 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
436 {
437         struct thread *td = curthread;
438         struct pcpu *pc;
439
440         if (SCHEDULER_STOPPED())
441                 return (1);
442
443         tracker->rmp_flags  = 0;
444         tracker->rmp_thread = td;
445         tracker->rmp_rmlock = rm;
446
447         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
448                 THREAD_NO_SLEEPING();
449
450         td->td_critnest++;      /* critical_enter(); */
451
452         __compiler_membar();
453
454         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
455
456         rm_tracker_add(pc, tracker);
457
458         sched_pin();
459
460         __compiler_membar();
461
462         td->td_critnest--;
463
464         /*
465          * Fast path to combine two common conditions into a single
466          * conditional jump.
467          */
468         if (__predict_true(0 == (td->td_owepreempt |
469             CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))))
470                 return (1);
471
472         /* We do not have a read token and need to acquire one. */
473         return _rm_rlock_hard(rm, tracker, trylock);
474 }
475
476 static __noinline void
477 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
478 {
479
480         if (td->td_owepreempt) {
481                 td->td_critnest++;
482                 critical_exit();
483         }
484
485         if (!tracker->rmp_flags)
486                 return;
487
488         mtx_lock_spin(&rm_spinlock);
489         LIST_REMOVE(tracker, rmp_qentry);
490
491         if (tracker->rmp_flags & RMPF_SIGNAL) {
492                 struct rmlock *rm;
493                 struct turnstile *ts;
494
495                 rm = tracker->rmp_rmlock;
496
497                 turnstile_chain_lock(&rm->lock_object);
498                 mtx_unlock_spin(&rm_spinlock);
499
500                 ts = turnstile_lookup(&rm->lock_object);
501
502                 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
503                 turnstile_unpend(ts);
504                 turnstile_chain_unlock(&rm->lock_object);
505         } else
506                 mtx_unlock_spin(&rm_spinlock);
507 }
508
509 void
510 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
511 {
512         struct pcpu *pc;
513         struct thread *td = tracker->rmp_thread;
514
515         if (SCHEDULER_STOPPED())
516                 return;
517
518         td->td_critnest++;      /* critical_enter(); */
519         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
520         rm_tracker_remove(pc, tracker);
521         td->td_critnest--;
522         sched_unpin();
523
524         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
525                 THREAD_SLEEPING_OK();
526
527         if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags)))
528                 return;
529
530         _rm_unlock_hard(td, tracker);
531 }
532
533 void
534 _rm_wlock(struct rmlock *rm)
535 {
536         struct rm_priotracker *prio;
537         struct turnstile *ts;
538         cpuset_t readcpus;
539
540         if (SCHEDULER_STOPPED())
541                 return;
542
543         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
544                 sx_xlock(&rm->rm_lock_sx);
545         else
546                 mtx_lock(&rm->rm_lock_mtx);
547
548         if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
549                 /* Get all read tokens back */
550                 readcpus = all_cpus;
551                 CPU_NAND(&readcpus, &rm->rm_writecpus);
552                 rm->rm_writecpus = all_cpus;
553
554                 /*
555                  * Assumes rm->rm_writecpus update is visible on other CPUs
556                  * before rm_cleanIPI is called.
557                  */
558 #ifdef SMP
559                 smp_rendezvous_cpus(readcpus,
560                     smp_no_rendezvous_barrier,
561                     rm_cleanIPI,
562                     smp_no_rendezvous_barrier,
563                     rm);
564
565 #else
566                 rm_cleanIPI(rm);
567 #endif
568
569                 mtx_lock_spin(&rm_spinlock);
570                 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
571                         ts = turnstile_trywait(&rm->lock_object);
572                         prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
573                         mtx_unlock_spin(&rm_spinlock);
574                         turnstile_wait(ts, prio->rmp_thread,
575                             TS_EXCLUSIVE_QUEUE);
576                         mtx_lock_spin(&rm_spinlock);
577                 }
578                 mtx_unlock_spin(&rm_spinlock);
579         }
580 }
581
582 void
583 _rm_wunlock(struct rmlock *rm)
584 {
585
586         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
587                 sx_xunlock(&rm->rm_lock_sx);
588         else
589                 mtx_unlock(&rm->rm_lock_mtx);
590 }
591
592 #if LOCK_DEBUG > 0
593
594 void
595 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
596 {
597
598         if (SCHEDULER_STOPPED())
599                 return;
600
601         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
602             ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
603             curthread, rm->lock_object.lo_name, file, line));
604         KASSERT(!rm_destroyed(rm),
605             ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
606         _rm_assert(rm, RA_UNLOCKED, file, line);
607
608         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
609             file, line, NULL);
610
611         _rm_wlock(rm);
612
613         LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
614         WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
615         TD_LOCKS_INC(curthread);
616 }
617
618 void
619 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
620 {
621
622         if (SCHEDULER_STOPPED())
623                 return;
624
625         KASSERT(!rm_destroyed(rm),
626             ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
627         _rm_assert(rm, RA_WLOCKED, file, line);
628         WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
629         LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
630         _rm_wunlock(rm);
631         TD_LOCKS_DEC(curthread);
632 }
633
634 int
635 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
636     int trylock, const char *file, int line)
637 {
638
639         if (SCHEDULER_STOPPED())
640                 return (1);
641
642 #ifdef INVARIANTS
643         if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
644                 critical_enter();
645                 KASSERT(rm_trackers_present(get_pcpu(), rm,
646                     curthread) == 0,
647                     ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
648                     rm->lock_object.lo_name, file, line));
649                 critical_exit();
650         }
651 #endif
652         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
653             ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
654             curthread, rm->lock_object.lo_name, file, line));
655         KASSERT(!rm_destroyed(rm),
656             ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
657         if (!trylock) {
658                 KASSERT(!rm_wowned(rm),
659                     ("rm_rlock: wlock already held for %s @ %s:%d",
660                     rm->lock_object.lo_name, file, line));
661                 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
662                     NULL);
663         }
664
665         if (_rm_rlock(rm, tracker, trylock)) {
666                 if (trylock)
667                         LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
668                             line);
669                 else
670                         LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
671                             line);
672                 WITNESS_LOCK(&rm->lock_object, 0, file, line);
673                 TD_LOCKS_INC(curthread);
674                 return (1);
675         } else if (trylock)
676                 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
677
678         return (0);
679 }
680
681 void
682 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
683     const char *file, int line)
684 {
685
686         if (SCHEDULER_STOPPED())
687                 return;
688
689         KASSERT(!rm_destroyed(rm),
690             ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
691         _rm_assert(rm, RA_RLOCKED, file, line);
692         WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
693         LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
694         _rm_runlock(rm, tracker);
695         TD_LOCKS_DEC(curthread);
696 }
697
698 #else
699
700 /*
701  * Just strip out file and line arguments if no lock debugging is enabled in
702  * the kernel - we are called from a kernel module.
703  */
704 void
705 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
706 {
707
708         _rm_wlock(rm);
709 }
710
711 void
712 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
713 {
714
715         _rm_wunlock(rm);
716 }
717
718 int
719 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
720     int trylock, const char *file, int line)
721 {
722
723         return _rm_rlock(rm, tracker, trylock);
724 }
725
726 void
727 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
728     const char *file, int line)
729 {
730
731         _rm_runlock(rm, tracker);
732 }
733
734 #endif
735
736 #ifdef INVARIANT_SUPPORT
737 #ifndef INVARIANTS
738 #undef _rm_assert
739 #endif
740
741 /*
742  * Note that this does not need to use witness_assert() for read lock
743  * assertions since an exact count of read locks held by this thread
744  * is computable.
745  */
746 void
747 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
748 {
749         int count;
750
751         if (SCHEDULER_STOPPED())
752                 return;
753         switch (what) {
754         case RA_LOCKED:
755         case RA_LOCKED | RA_RECURSED:
756         case RA_LOCKED | RA_NOTRECURSED:
757         case RA_RLOCKED:
758         case RA_RLOCKED | RA_RECURSED:
759         case RA_RLOCKED | RA_NOTRECURSED:
760                 /*
761                  * Handle the write-locked case.  Unlike other
762                  * primitives, writers can never recurse.
763                  */
764                 if (rm_wowned(rm)) {
765                         if (what & RA_RLOCKED)
766                                 panic("Lock %s exclusively locked @ %s:%d\n",
767                                     rm->lock_object.lo_name, file, line);
768                         if (what & RA_RECURSED)
769                                 panic("Lock %s not recursed @ %s:%d\n",
770                                     rm->lock_object.lo_name, file, line);
771                         break;
772                 }
773
774                 critical_enter();
775                 count = rm_trackers_present(get_pcpu(), rm, curthread);
776                 critical_exit();
777
778                 if (count == 0)
779                         panic("Lock %s not %slocked @ %s:%d\n",
780                             rm->lock_object.lo_name, (what & RA_RLOCKED) ?
781                             "read " : "", file, line);
782                 if (count > 1) {
783                         if (what & RA_NOTRECURSED)
784                                 panic("Lock %s recursed @ %s:%d\n",
785                                     rm->lock_object.lo_name, file, line);
786                 } else if (what & RA_RECURSED)
787                         panic("Lock %s not recursed @ %s:%d\n",
788                             rm->lock_object.lo_name, file, line);
789                 break;
790         case RA_WLOCKED:
791                 if (!rm_wowned(rm))
792                         panic("Lock %s not exclusively locked @ %s:%d\n",
793                             rm->lock_object.lo_name, file, line);
794                 break;
795         case RA_UNLOCKED:
796                 if (rm_wowned(rm))
797                         panic("Lock %s exclusively locked @ %s:%d\n",
798                             rm->lock_object.lo_name, file, line);
799
800                 critical_enter();
801                 count = rm_trackers_present(get_pcpu(), rm, curthread);
802                 critical_exit();
803
804                 if (count != 0)
805                         panic("Lock %s read locked @ %s:%d\n",
806                             rm->lock_object.lo_name, file, line);
807                 break;
808         default:
809                 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
810                     line);
811         }
812 }
813 #endif /* INVARIANT_SUPPORT */
814
815 #ifdef DDB
816 static void
817 print_tracker(struct rm_priotracker *tr)
818 {
819         struct thread *td;
820
821         td = tr->rmp_thread;
822         db_printf("   thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
823             td->td_proc->p_pid, td->td_name);
824         if (tr->rmp_flags & RMPF_ONQUEUE) {
825                 db_printf("ONQUEUE");
826                 if (tr->rmp_flags & RMPF_SIGNAL)
827                         db_printf(",SIGNAL");
828         } else
829                 db_printf("0");
830         db_printf("}\n");
831 }
832
833 static void
834 db_show_rm(const struct lock_object *lock)
835 {
836         struct rm_priotracker *tr;
837         struct rm_queue *queue;
838         const struct rmlock *rm;
839         struct lock_class *lc;
840         struct pcpu *pc;
841
842         rm = (const struct rmlock *)lock;
843         db_printf(" writecpus: ");
844         ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
845         db_printf("\n");
846         db_printf(" per-CPU readers:\n");
847         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
848                 for (queue = pc->pc_rm_queue.rmq_next;
849                     queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
850                         tr = (struct rm_priotracker *)queue;
851                         if (tr->rmp_rmlock == rm)
852                                 print_tracker(tr);
853                 }
854         db_printf(" active readers:\n");
855         LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
856                 print_tracker(tr);
857         lc = LOCK_CLASS(&rm->rm_wlock_object);
858         db_printf("Backing write-lock (%s):\n", lc->lc_name);
859         lc->lc_ddb_show(&rm->rm_wlock_object);
860 }
861 #endif