2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Implementation of the `witness' lock verifier. Originally implemented for
34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
40 * Pronunciation: 'wit-n&s
42 * Etymology: Middle English witnesse, from Old English witnes knowledge,
43 * testimony, witness, from 2wit
44 * Date: before 12th century
45 * 1 : attestation of a fact or event : TESTIMONY
46 * 2 : one that gives evidence; specifically : one who testifies in
47 * a cause or before a judicial tribunal
48 * 3 : one asked to be present at a transaction so as to be able to
49 * testify to its having taken place
50 * 4 : one who has personal knowledge of something
51 * 5 a : something serving as evidence or proof : SIGN
52 * b : public affirmation by word or example of usually
53 * religious faith or conviction <the heroic witness to divine
55 * 6 capitalized : a member of the Jehovah's Witnesses
59 * Special rules concerning Giant and lock orders:
61 * 1) Giant must be acquired before any other mutexes. Stated another way,
62 * no other mutex may be held when Giant is acquired.
64 * 2) Giant must be released when blocking on a sleepable lock.
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl(). Basically, when a thread sleeps, it must release
68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
71 * 3) Giant may be acquired before or after sleepable locks.
73 * This rule is also not quite as obvious. Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock. The second
76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute. Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
88 #include "opt_witness.h"
90 #include <sys/param.h>
93 #include <sys/kernel.h>
96 #include <sys/malloc.h>
97 #include <sys/mutex.h>
99 #include <sys/sysctl.h>
100 #include <sys/systm.h>
104 #include <machine/stdarg.h>
107 #define KTR_WITNESS KTR_SUBSYS
109 #define KTR_WITNESS 0
112 /* Easier to stay with the old names. */
113 #define lo_list lo_witness_data.lod_list
114 #define lo_witness lo_witness_data.lod_witness
116 /* Define this to check for blessed mutexes */
119 #define WITNESS_COUNT 1024
120 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
122 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
123 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should
124 * probably be safe for the most part, but it's still a SWAG.
126 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
128 #define WITNESS_NCHILDREN 6
130 struct witness_child_list_entry;
134 struct lock_class *w_class;
135 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
136 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
137 struct witness_child_list_entry *w_children; /* Great evilness... */
142 u_char w_Giant_squawked:1;
143 u_char w_other_squawked:1;
144 u_char w_same_squawked:1;
145 u_char w_displayed:1;
148 struct witness_child_list_entry {
149 struct witness_child_list_entry *wcl_next;
150 struct witness *wcl_children[WITNESS_NCHILDREN];
154 STAILQ_HEAD(witness_list, witness);
157 struct witness_blessed {
163 struct witness_order_list_entry {
165 struct lock_class *w_class;
169 static int blessed(struct witness *, struct witness *);
171 static int depart(struct witness *w);
172 static struct witness *enroll(const char *description,
173 struct lock_class *lock_class);
174 static int insertchild(struct witness *parent, struct witness *child);
175 static int isitmychild(struct witness *parent, struct witness *child);
176 static int isitmydescendant(struct witness *parent, struct witness *child);
177 static int itismychild(struct witness *parent, struct witness *child);
178 static void removechild(struct witness *parent, struct witness *child);
179 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
180 static const char *fixup_filename(const char *file);
181 static struct witness *witness_get(void);
182 static void witness_free(struct witness *m);
183 static struct witness_child_list_entry *witness_child_get(void);
184 static void witness_child_free(struct witness_child_list_entry *wcl);
185 static struct lock_list_entry *witness_lock_list_get(void);
186 static void witness_lock_list_free(struct lock_list_entry *lle);
187 static struct lock_instance *find_instance(struct lock_list_entry *lock_list,
188 struct lock_object *lock);
189 static void witness_list_lock(struct lock_instance *instance);
191 static void witness_leveldescendents(struct witness *parent, int level);
192 static void witness_levelall(void);
193 static void witness_displaydescendants(void(*)(const char *fmt, ...),
194 struct witness *, int indent);
195 static void witness_display_list(void(*prnt)(const char *fmt, ...),
196 struct witness_list *list);
197 static void witness_display(void(*)(const char *fmt, ...));
198 static void witness_list(struct thread *td);
201 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
204 * If set to 0, witness is disabled. If set to a non-zero value, witness
205 * performs full lock order checking for all locks. At runtime, this
206 * value may be set to 0 to turn off witness. witness is not allowed be
207 * turned on once it is turned off, however.
209 static int witness_watch = 1;
210 TUNABLE_INT("debug.witness.watch", &witness_watch);
211 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
212 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
216 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
217 * to drop into kdebug() when:
218 * - a lock hierarchy violation occurs
219 * - locks are held when going to sleep.
226 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
227 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
230 * When KDB is enabled and witness_trace is set to 1, it will cause the system
231 * to print a stack trace:
232 * - a lock hierarchy violation occurs
233 * - locks are held when going to sleep.
235 int witness_trace = 1;
236 TUNABLE_INT("debug.witness.trace", &witness_trace);
237 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
240 #ifdef WITNESS_SKIPSPIN
241 int witness_skipspin = 1;
243 int witness_skipspin = 0;
245 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
246 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
247 &witness_skipspin, 0, "");
249 static struct mtx w_mtx;
250 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
251 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
252 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
253 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
254 static struct witness_child_list_entry *w_child_free = NULL;
255 static struct lock_list_entry *w_lock_list_free = NULL;
257 static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
258 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
259 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
260 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
262 SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
263 &w_child_free_cnt, 0, "");
264 SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
267 static struct witness w_data[WITNESS_COUNT];
268 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
269 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
271 static struct witness_order_list_entry order_lists[] = {
275 { "proctree", &lock_class_sx },
276 { "allproc", &lock_class_sx },
281 { "Giant", &lock_class_mtx_sleep },
282 { "filedesc structure", &lock_class_mtx_sleep },
283 { "pipe mutex", &lock_class_mtx_sleep },
284 { "sigio lock", &lock_class_mtx_sleep },
285 { "process group", &lock_class_mtx_sleep },
286 { "process lock", &lock_class_mtx_sleep },
287 { "session", &lock_class_mtx_sleep },
288 { "uidinfo hash", &lock_class_mtx_sleep },
289 { "uidinfo struct", &lock_class_mtx_sleep },
290 { "allprison", &lock_class_mtx_sleep },
295 { "filedesc structure", &lock_class_mtx_sleep },
296 { "accept", &lock_class_mtx_sleep },
297 { "so_snd", &lock_class_mtx_sleep },
298 { "so_rcv", &lock_class_mtx_sleep },
299 { "sellck", &lock_class_mtx_sleep },
304 { "so_rcv", &lock_class_mtx_sleep },
305 { "radix node head", &lock_class_mtx_sleep },
306 { "rtentry", &lock_class_mtx_sleep },
307 { "ifaddr", &lock_class_mtx_sleep },
310 * Multicast - protocol locks before interface locks, after UDP locks.
312 { "udpinp", &lock_class_mtx_sleep },
313 { "in_multi_mtx", &lock_class_mtx_sleep },
314 { "igmp_mtx", &lock_class_mtx_sleep },
315 { "if_addr_mtx", &lock_class_mtx_sleep },
318 * UNIX Domain Sockets
320 { "unp", &lock_class_mtx_sleep },
321 { "so_snd", &lock_class_mtx_sleep },
326 { "udp", &lock_class_mtx_sleep },
327 { "udpinp", &lock_class_mtx_sleep },
328 { "so_snd", &lock_class_mtx_sleep },
333 { "tcp", &lock_class_mtx_sleep },
334 { "tcpinp", &lock_class_mtx_sleep },
335 { "so_snd", &lock_class_mtx_sleep },
340 { "slip_mtx", &lock_class_mtx_sleep },
341 { "slip sc_mtx", &lock_class_mtx_sleep },
346 { "ddp_list_mtx", &lock_class_mtx_sleep },
347 { "ddp_mtx", &lock_class_mtx_sleep },
352 { "bpf global lock", &lock_class_mtx_sleep },
353 { "bpf interface lock", &lock_class_mtx_sleep },
354 { "bpf cdev lock", &lock_class_mtx_sleep },
359 { "nfsd_mtx", &lock_class_mtx_sleep },
360 { "so_snd", &lock_class_mtx_sleep },
365 { "system map", &lock_class_mtx_sleep },
366 { "vm page queue mutex", &lock_class_mtx_sleep },
367 { "vnode interlock", &lock_class_mtx_sleep },
368 { "cdev", &lock_class_mtx_sleep },
374 { "ap boot", &lock_class_mtx_spin },
376 { "rm.mutex_mtx", &lock_class_mtx_spin },
377 { "hptlock", &lock_class_mtx_spin },
378 { "sio", &lock_class_mtx_spin },
380 { "cy", &lock_class_mtx_spin },
382 { "uart_hwmtx", &lock_class_mtx_spin },
383 { "sabtty", &lock_class_mtx_spin },
384 { "zstty", &lock_class_mtx_spin },
385 { "ng_node", &lock_class_mtx_spin },
386 { "ng_worklist", &lock_class_mtx_spin },
387 { "taskqueue_fast", &lock_class_mtx_spin },
388 { "intr table", &lock_class_mtx_spin },
389 { "sleepq chain", &lock_class_mtx_spin },
390 { "sched lock", &lock_class_mtx_spin },
391 { "turnstile chain", &lock_class_mtx_spin },
392 { "td_contested", &lock_class_mtx_spin },
393 { "callout", &lock_class_mtx_spin },
394 { "entropy harvest mutex", &lock_class_mtx_spin },
398 { "allpmaps", &lock_class_mtx_spin },
399 { "vm page queue free mutex", &lock_class_mtx_spin },
400 { "icu", &lock_class_mtx_spin },
402 { "smp rendezvous", &lock_class_mtx_spin },
403 #if defined(__i386__) || defined(__amd64__)
404 { "tlb", &lock_class_mtx_spin },
407 { "ipi", &lock_class_mtx_spin },
408 { "rtc_mtx", &lock_class_mtx_spin },
411 { "clk", &lock_class_mtx_spin },
412 { "mutex profiling lock", &lock_class_mtx_spin },
413 { "kse zombie lock", &lock_class_mtx_spin },
414 { "ALD Queue", &lock_class_mtx_spin },
416 { "MCA spin lock", &lock_class_mtx_spin },
418 #if defined(__i386__) || defined(__amd64__)
419 { "pcicfg", &lock_class_mtx_spin },
420 { "NDIS thread lock", &lock_class_mtx_spin },
422 { "tw_osl_io_lock", &lock_class_mtx_spin },
423 { "tw_osl_q_lock", &lock_class_mtx_spin },
424 { "tw_cl_io_lock", &lock_class_mtx_spin },
425 { "tw_cl_intr_lock", &lock_class_mtx_spin },
426 { "tw_cl_gen_lock", &lock_class_mtx_spin },
433 * Pairs of locks which have been blessed
434 * Don't complain about order problems with blessed locks
436 static struct witness_blessed blessed_list[] = {
438 static int blessed_count =
439 sizeof(blessed_list) / sizeof(struct witness_blessed);
443 * List of locks initialized prior to witness being initialized whose
444 * enrollment is currently deferred.
446 STAILQ_HEAD(, lock_object) pending_locks =
447 STAILQ_HEAD_INITIALIZER(pending_locks);
450 * This global is set to 0 once it becomes safe to use the witness code.
452 static int witness_cold = 1;
455 * This global is set to 1 once the static lock orders have been enrolled
456 * so that a warning can be issued for any spin locks enrolled later.
458 static int witness_spin_warn = 0;
461 * The WITNESS-enabled diagnostic code. Note that the witness code does
462 * assume that the early boot is single-threaded at least until after this
463 * routine is completed.
466 witness_initialize(void *dummy __unused)
468 struct lock_object *lock;
469 struct witness_order_list_entry *order;
470 struct witness *w, *w1;
474 * We have to release Giant before initializing its witness
475 * structure so that WITNESS doesn't get confused.
478 mtx_assert(&Giant, MA_NOTOWNED);
480 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
481 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
483 for (i = 0; i < WITNESS_COUNT; i++)
484 witness_free(&w_data[i]);
485 for (i = 0; i < WITNESS_CHILDCOUNT; i++)
486 witness_child_free(&w_childdata[i]);
487 for (i = 0; i < LOCK_CHILDCOUNT; i++)
488 witness_lock_list_free(&w_locklistdata[i]);
490 /* First add in all the specified order lists. */
491 for (order = order_lists; order->w_name != NULL; order++) {
492 w = enroll(order->w_name, order->w_class);
495 w->w_file = "order list";
496 for (order++; order->w_name != NULL; order++) {
497 w1 = enroll(order->w_name, order->w_class);
500 w1->w_file = "order list";
501 if (!itismychild(w, w1))
502 panic("Not enough memory for static orders!");
506 witness_spin_warn = 1;
508 /* Iterate through all locks and add them to witness. */
509 while (!STAILQ_EMPTY(&pending_locks)) {
510 lock = STAILQ_FIRST(&pending_locks);
511 STAILQ_REMOVE_HEAD(&pending_locks, lo_list);
512 KASSERT(lock->lo_flags & LO_WITNESS,
513 ("%s: lock %s is on pending list but not LO_WITNESS",
514 __func__, lock->lo_name));
515 lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock));
518 /* Mark the witness code as being ready for use. */
523 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
526 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
530 value = witness_watch;
531 error = sysctl_handle_int(oidp, &value, 0, req);
532 if (error != 0 || req->newptr == NULL)
534 error = suser(req->td);
537 if (value == witness_watch)
546 witness_init(struct lock_object *lock)
548 struct lock_class *class;
550 /* Various sanity checks. */
551 class = LOCK_CLASS(lock);
552 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
553 (class->lc_flags & LC_RECURSABLE) == 0)
554 panic("%s: lock (%s) %s can not be recursable", __func__,
555 class->lc_name, lock->lo_name);
556 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
557 (class->lc_flags & LC_SLEEPABLE) == 0)
558 panic("%s: lock (%s) %s can not be sleepable", __func__,
559 class->lc_name, lock->lo_name);
560 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
561 (class->lc_flags & LC_UPGRADABLE) == 0)
562 panic("%s: lock (%s) %s can not be upgradable", __func__,
563 class->lc_name, lock->lo_name);
566 * If we shouldn't watch this lock, then just clear lo_witness.
567 * Otherwise, if witness_cold is set, then it is too early to
568 * enroll this lock, so defer it to witness_initialize() by adding
569 * it to the pending_locks list. If it is not too early, then enroll
572 if (witness_watch == 0 || panicstr != NULL ||
573 (lock->lo_flags & LO_WITNESS) == 0)
574 lock->lo_witness = NULL;
575 else if (witness_cold) {
576 STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list);
577 lock->lo_flags |= LO_ENROLLPEND;
579 lock->lo_witness = enroll(lock->lo_type, class);
583 witness_destroy(struct lock_object *lock)
585 struct lock_class *class;
588 class = LOCK_CLASS(lock);
590 panic("lock (%s) %s destroyed while witness_cold",
591 class->lc_name, lock->lo_name);
593 /* XXX: need to verify that no one holds the lock */
594 if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS &&
595 lock->lo_witness != NULL) {
596 w = lock->lo_witness;
597 mtx_lock_spin(&w_mtx);
598 MPASS(w->w_refcount > 0);
602 * Lock is already released if we have an allocation failure
603 * and depart() fails.
605 if (w->w_refcount != 0 || depart(w))
606 mtx_unlock_spin(&w_mtx);
610 * If this lock is destroyed before witness is up and running,
611 * remove it from the pending list.
613 if (lock->lo_flags & LO_ENROLLPEND) {
614 STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list);
615 lock->lo_flags &= ~LO_ENROLLPEND;
621 witness_levelall (void)
623 struct witness_list *list;
624 struct witness *w, *w1;
627 * First clear all levels.
629 STAILQ_FOREACH(w, &w_all, w_list) {
634 * Look for locks with no parent and level all their descendants.
636 STAILQ_FOREACH(w, &w_all, w_list) {
638 * This is just an optimization, technically we could get
639 * away just walking the all list each time.
641 if (w->w_class->lc_flags & LC_SLEEPLOCK)
645 STAILQ_FOREACH(w1, list, w_typelist) {
646 if (isitmychild(w1, w))
649 witness_leveldescendents(w, 0);
651 ; /* silence GCC 3.x */
656 witness_leveldescendents(struct witness *parent, int level)
658 struct witness_child_list_entry *wcl;
661 if (parent->w_level < level)
662 parent->w_level = level;
664 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
665 for (i = 0; i < wcl->wcl_count; i++)
666 witness_leveldescendents(wcl->wcl_children[i], level);
670 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
671 struct witness *parent, int indent)
673 struct witness_child_list_entry *wcl;
676 level = parent->w_level;
678 for (i = 0; i < indent; i++)
680 if (parent->w_refcount > 0)
681 prnt("%s", parent->w_name);
684 if (parent->w_displayed) {
685 prnt(" -- (already displayed)\n");
688 parent->w_displayed = 1;
689 if (parent->w_refcount > 0) {
690 if (parent->w_file != NULL)
691 prnt(" -- last acquired @ %s:%d", parent->w_file,
695 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
696 for (i = 0; i < wcl->wcl_count; i++)
697 witness_displaydescendants(prnt,
698 wcl->wcl_children[i], indent + 1);
702 witness_display_list(void(*prnt)(const char *fmt, ...),
703 struct witness_list *list)
707 STAILQ_FOREACH(w, list, w_typelist) {
708 if (w->w_file == NULL || w->w_level > 0)
711 * This lock has no anscestors, display its descendants.
713 witness_displaydescendants(prnt, w, 0);
718 witness_display(void(*prnt)(const char *fmt, ...))
722 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
725 /* Clear all the displayed flags. */
726 STAILQ_FOREACH(w, &w_all, w_list) {
731 * First, handle sleep locks which have been acquired at least
734 prnt("Sleep locks:\n");
735 witness_display_list(prnt, &w_sleep);
738 * Now do spin locks which have been acquired at least once.
740 prnt("\nSpin locks:\n");
741 witness_display_list(prnt, &w_spin);
744 * Finally, any locks which have not been acquired yet.
746 prnt("\nLocks which were never acquired:\n");
747 STAILQ_FOREACH(w, &w_all, w_list) {
748 if (w->w_file != NULL || w->w_refcount == 0)
750 prnt("%s\n", w->w_name);
755 /* Trim useless garbage from filenames. */
757 fixup_filename(const char *file)
762 while (strncmp(file, "../", 3) == 0)
768 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
771 if (witness_watch == 0 || panicstr != NULL)
774 /* Require locks that witness knows about. */
775 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
776 lock2->lo_witness == NULL)
779 MPASS(!mtx_owned(&w_mtx));
780 mtx_lock_spin(&w_mtx);
783 * If we already have either an explicit or implied lock order that
784 * is the other way around, then return an error.
786 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
787 mtx_unlock_spin(&w_mtx);
791 /* Try to add the new order. */
792 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
793 lock2->lo_type, lock1->lo_type);
794 if (!itismychild(lock1->lo_witness, lock2->lo_witness))
796 mtx_unlock_spin(&w_mtx);
801 witness_checkorder(struct lock_object *lock, int flags, const char *file,
804 struct lock_list_entry **lock_list, *lle;
805 struct lock_instance *lock1, *lock2;
806 struct lock_class *class;
807 struct witness *w, *w1;
811 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
816 * Try locks do not block if they fail to acquire the lock, thus
817 * there is no danger of deadlocks or of switching while holding a
818 * spin lock if we acquire a lock via a try operation. This
819 * function shouldn't even be called for try locks, so panic if
822 if (flags & LOP_TRYLOCK)
823 panic("%s should not be called for try lock operations",
826 w = lock->lo_witness;
827 class = LOCK_CLASS(lock);
829 file = fixup_filename(file);
831 if (class->lc_flags & LC_SLEEPLOCK) {
833 * Since spin locks include a critical section, this check
834 * implicitly enforces a lock order of all sleep locks before
837 if (td->td_critnest != 0 && !kdb_active)
838 panic("blockable sleep lock (%s) %s @ %s:%d",
839 class->lc_name, lock->lo_name, file, line);
842 * If this is the first lock acquired then just return as
843 * no order checking is needed.
845 if (td->td_sleeplocks == NULL)
847 lock_list = &td->td_sleeplocks;
850 * If this is the first lock, just return as no order
851 * checking is needed. We check this in both if clauses
852 * here as unifying the check would require us to use a
853 * critical section to ensure we don't migrate while doing
854 * the check. Note that if this is not the first lock, we
855 * are already in a critical section and are safe for the
858 if (PCPU_GET(spinlocks) == NULL)
860 lock_list = PCPU_PTR(spinlocks);
864 * Check to see if we are recursing on a lock we already own. If
865 * so, make sure that we don't mismatch exclusive and shared lock
868 lock1 = find_instance(*lock_list, lock);
870 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
871 (flags & LOP_EXCLUSIVE) == 0) {
872 printf("shared lock of (%s) %s @ %s:%d\n",
873 class->lc_name, lock->lo_name, file, line);
874 printf("while exclusively locked from %s:%d\n",
875 lock1->li_file, lock1->li_line);
876 panic("share->excl");
878 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
879 (flags & LOP_EXCLUSIVE) != 0) {
880 printf("exclusive lock of (%s) %s @ %s:%d\n",
881 class->lc_name, lock->lo_name, file, line);
882 printf("while share locked from %s:%d\n",
883 lock1->li_file, lock1->li_line);
884 panic("excl->share");
890 * Try locks do not block if they fail to acquire the lock, thus
891 * there is no danger of deadlocks or of switching while holding a
892 * spin lock if we acquire a lock via a try operation.
894 if (flags & LOP_TRYLOCK)
898 * Check for duplicate locks of the same type. Note that we only
899 * have to check for this on the last lock we just acquired. Any
900 * other cases will be caught as lock order violations.
902 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
903 w1 = lock1->li_lock->lo_witness;
905 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
908 w->w_same_squawked = 1;
909 printf("acquiring duplicate lock of same type: \"%s\"\n",
911 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
912 lock1->li_file, lock1->li_line);
913 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
920 MPASS(!mtx_owned(&w_mtx));
921 mtx_lock_spin(&w_mtx);
923 * If we know that the the lock we are acquiring comes after
924 * the lock we most recently acquired in the lock order tree,
925 * then there is no need for any further checks.
927 if (isitmychild(w1, w)) {
928 mtx_unlock_spin(&w_mtx);
931 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
932 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
934 MPASS(j < WITNESS_COUNT);
935 lock1 = &lle->ll_children[i];
936 w1 = lock1->li_lock->lo_witness;
939 * If this lock doesn't undergo witness checking,
943 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
944 ("lock missing witness structure"));
948 * If we are locking Giant and this is a sleepable
949 * lock, then skip it.
951 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
952 lock == &Giant.mtx_object)
955 * If we are locking a sleepable lock and this lock
956 * is Giant, then skip it.
958 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
959 lock1->li_lock == &Giant.mtx_object)
962 * If we are locking a sleepable lock and this lock
963 * isn't sleepable, we want to treat it as a lock
964 * order violation to enfore a general lock order of
965 * sleepable locks before non-sleepable locks.
967 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
968 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
971 * If we are locking Giant and this is a non-sleepable
972 * lock, then treat it as a reversal.
974 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
975 lock == &Giant.mtx_object)
978 * Check the lock order hierarchy for a reveresal.
980 if (!isitmydescendant(w, w1))
984 * We have a lock order violation, check to see if it
985 * is allowed or has already been yelled about.
987 mtx_unlock_spin(&w_mtx);
990 * If the lock order is blessed, just bail. We don't
991 * look for other lock order violations though, which
997 if (lock1->li_lock == &Giant.mtx_object) {
998 if (w1->w_Giant_squawked)
1001 w1->w_Giant_squawked = 1;
1003 if (w1->w_other_squawked)
1006 w1->w_other_squawked = 1;
1009 * Ok, yell about it.
1011 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1012 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1014 "lock order reversal: (sleepable after non-sleepable)\n");
1015 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1016 && lock == &Giant.mtx_object)
1018 "lock order reversal: (Giant after non-sleepable)\n");
1020 printf("lock order reversal:\n");
1022 * Try to locate an earlier lock with
1023 * witness w in our list.
1026 lock2 = &lle->ll_children[i];
1027 MPASS(lock2->li_lock != NULL);
1028 if (lock2->li_lock->lo_witness == w)
1030 if (i == 0 && lle->ll_next != NULL) {
1032 i = lle->ll_count - 1;
1033 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1038 printf(" 1st %p %s (%s) @ %s:%d\n",
1039 lock1->li_lock, lock1->li_lock->lo_name,
1040 lock1->li_lock->lo_type, lock1->li_file,
1042 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1043 lock->lo_name, lock->lo_type, file, line);
1045 printf(" 1st %p %s (%s) @ %s:%d\n",
1046 lock2->li_lock, lock2->li_lock->lo_name,
1047 lock2->li_lock->lo_type, lock2->li_file,
1049 printf(" 2nd %p %s (%s) @ %s:%d\n",
1050 lock1->li_lock, lock1->li_lock->lo_name,
1051 lock1->li_lock->lo_type, lock1->li_file,
1053 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1054 lock->lo_name, lock->lo_type, file, line);
1063 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1065 * If requested, build a new lock order. However, don't build a new
1066 * relationship between a sleepable lock and Giant if it is in the
1067 * wrong direction. The correct lock order is that sleepable locks
1068 * always come before Giant.
1070 if (flags & LOP_NEWORDER &&
1071 !(lock1->li_lock == &Giant.mtx_object &&
1072 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1073 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1074 lock->lo_type, lock1->li_lock->lo_type);
1075 if (!itismychild(lock1->li_lock->lo_witness, w))
1076 /* Witness is dead. */
1079 mtx_unlock_spin(&w_mtx);
1087 kdb_enter(__func__);
1092 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1094 struct lock_list_entry **lock_list, *lle;
1095 struct lock_instance *instance;
1099 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1102 w = lock->lo_witness;
1104 file = fixup_filename(file);
1106 /* Determine lock list for this lock. */
1107 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1108 lock_list = &td->td_sleeplocks;
1110 lock_list = PCPU_PTR(spinlocks);
1112 /* Check to see if we are recursing on a lock we already own. */
1113 instance = find_instance(*lock_list, lock);
1114 if (instance != NULL) {
1115 instance->li_flags++;
1116 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1117 td->td_proc->p_pid, lock->lo_name,
1118 instance->li_flags & LI_RECURSEMASK);
1119 instance->li_file = file;
1120 instance->li_line = line;
1124 /* Update per-witness last file and line acquire. */
1128 /* Find the next open lock instance in the list and fill it. */
1130 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1131 lle = witness_lock_list_get();
1134 lle->ll_next = *lock_list;
1135 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1136 td->td_proc->p_pid, lle);
1139 instance = &lle->ll_children[lle->ll_count++];
1140 instance->li_lock = lock;
1141 instance->li_line = line;
1142 instance->li_file = file;
1143 if ((flags & LOP_EXCLUSIVE) != 0)
1144 instance->li_flags = LI_EXCLUSIVE;
1146 instance->li_flags = 0;
1147 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1148 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1152 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1154 struct lock_instance *instance;
1155 struct lock_class *class;
1157 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1158 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1160 class = LOCK_CLASS(lock);
1161 file = fixup_filename(file);
1162 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1163 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1164 class->lc_name, lock->lo_name, file, line);
1165 if ((flags & LOP_TRYLOCK) == 0)
1166 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1167 lock->lo_name, file, line);
1168 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1169 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1170 class->lc_name, lock->lo_name, file, line);
1171 instance = find_instance(curthread->td_sleeplocks, lock);
1172 if (instance == NULL)
1173 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1174 class->lc_name, lock->lo_name, file, line);
1175 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1176 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1177 class->lc_name, lock->lo_name, file, line);
1178 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1179 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1180 class->lc_name, lock->lo_name,
1181 instance->li_flags & LI_RECURSEMASK, file, line);
1182 instance->li_flags |= LI_EXCLUSIVE;
1186 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1189 struct lock_instance *instance;
1190 struct lock_class *class;
1192 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1193 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1195 class = LOCK_CLASS(lock);
1196 file = fixup_filename(file);
1197 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1198 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1199 class->lc_name, lock->lo_name, file, line);
1200 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1201 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1202 class->lc_name, lock->lo_name, file, line);
1203 instance = find_instance(curthread->td_sleeplocks, lock);
1204 if (instance == NULL)
1205 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1206 class->lc_name, lock->lo_name, file, line);
1207 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1208 panic("downgrade of shared lock (%s) %s @ %s:%d",
1209 class->lc_name, lock->lo_name, file, line);
1210 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1211 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1212 class->lc_name, lock->lo_name,
1213 instance->li_flags & LI_RECURSEMASK, file, line);
1214 instance->li_flags &= ~LI_EXCLUSIVE;
1218 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1220 struct lock_list_entry **lock_list, *lle;
1221 struct lock_instance *instance;
1222 struct lock_class *class;
1227 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1231 class = LOCK_CLASS(lock);
1232 file = fixup_filename(file);
1234 /* Find lock instance associated with this lock. */
1235 if (class->lc_flags & LC_SLEEPLOCK)
1236 lock_list = &td->td_sleeplocks;
1238 lock_list = PCPU_PTR(spinlocks);
1239 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1240 for (i = 0; i < (*lock_list)->ll_count; i++) {
1241 instance = &(*lock_list)->ll_children[i];
1242 if (instance->li_lock == lock)
1245 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1249 /* First, check for shared/exclusive mismatches. */
1250 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1251 (flags & LOP_EXCLUSIVE) == 0) {
1252 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1253 lock->lo_name, file, line);
1254 printf("while exclusively locked from %s:%d\n",
1255 instance->li_file, instance->li_line);
1256 panic("excl->ushare");
1258 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1259 (flags & LOP_EXCLUSIVE) != 0) {
1260 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1261 lock->lo_name, file, line);
1262 printf("while share locked from %s:%d\n", instance->li_file,
1264 panic("share->uexcl");
1267 /* If we are recursed, unrecurse. */
1268 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1269 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1270 td->td_proc->p_pid, instance->li_lock->lo_name,
1271 instance->li_flags);
1272 instance->li_flags--;
1276 /* Otherwise, remove this item from the list. */
1278 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1279 td->td_proc->p_pid, instance->li_lock->lo_name,
1280 (*lock_list)->ll_count - 1);
1281 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1282 (*lock_list)->ll_children[j] =
1283 (*lock_list)->ll_children[j + 1];
1284 (*lock_list)->ll_count--;
1287 /* If this lock list entry is now empty, free it. */
1288 if ((*lock_list)->ll_count == 0) {
1290 *lock_list = lle->ll_next;
1291 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1292 td->td_proc->p_pid, lle);
1293 witness_lock_list_free(lle);
1298 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1299 * exempt Giant and sleepable locks from the checks as well. If any
1300 * non-exempt locks are held, then a supplied message is printed to the
1301 * console along with a list of the offending locks. If indicated in the
1302 * flags then a failure results in a panic as well.
1305 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1307 struct lock_list_entry *lle;
1308 struct lock_instance *lock1;
1313 if (witness_cold || witness_watch == 0 || panicstr != NULL)
1317 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1318 for (i = lle->ll_count - 1; i >= 0; i--) {
1319 lock1 = &lle->ll_children[i];
1320 if (lock1->li_lock == lock)
1322 if (flags & WARN_GIANTOK &&
1323 lock1->li_lock == &Giant.mtx_object)
1325 if (flags & WARN_SLEEPOK &&
1326 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1332 printf(" with the following");
1333 if (flags & WARN_SLEEPOK)
1334 printf(" non-sleepable");
1335 printf(" locks held:\n");
1338 witness_list_lock(lock1);
1340 if (PCPU_GET(spinlocks) != NULL) {
1342 * Since we already hold a spinlock preemption is
1349 printf(" with the following");
1350 if (flags & WARN_SLEEPOK)
1351 printf(" non-sleepable");
1352 printf(" locks held:\n");
1354 n += witness_list_locks(PCPU_PTR(spinlocks));
1356 if (flags & WARN_PANIC && n)
1357 panic("witness_warn");
1359 else if (witness_kdb && n)
1360 kdb_enter(__func__);
1361 else if (witness_trace && n)
1368 witness_file(struct lock_object *lock)
1372 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1374 w = lock->lo_witness;
1379 witness_line(struct lock_object *lock)
1383 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1385 w = lock->lo_witness;
1389 static struct witness *
1390 enroll(const char *description, struct lock_class *lock_class)
1394 if (witness_watch == 0 || panicstr != NULL)
1396 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1398 mtx_lock_spin(&w_mtx);
1399 STAILQ_FOREACH(w, &w_all, w_list) {
1400 if (w->w_name == description || (w->w_refcount > 0 &&
1401 strcmp(description, w->w_name) == 0)) {
1403 mtx_unlock_spin(&w_mtx);
1404 if (lock_class != w->w_class)
1406 "lock (%s) %s does not match earlier (%s) lock",
1407 description, lock_class->lc_name,
1408 w->w_class->lc_name);
1412 if ((w = witness_get()) == NULL)
1414 w->w_name = description;
1415 w->w_class = lock_class;
1417 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1418 if (lock_class->lc_flags & LC_SPINLOCK) {
1419 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1421 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1422 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1425 mtx_unlock_spin(&w_mtx);
1426 panic("lock class %s is not sleep or spin",
1427 lock_class->lc_name);
1429 mtx_unlock_spin(&w_mtx);
1432 * We issue a warning for any spin locks not defined in the static
1433 * order list as a way to discourage their use (folks should really
1434 * be using non-spin mutexes most of the time). However, several
1435 * 3rd part device drivers use spin locks because that is all they
1436 * have available on Windows and Linux and they think that normal
1437 * mutexes are insufficient.
1439 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1440 printf("WITNESS: spin lock %s not in order list\n",
1445 /* Don't let the door bang you on the way out... */
1447 depart(struct witness *w)
1449 struct witness_child_list_entry *wcl, *nwcl;
1450 struct witness_list *list;
1451 struct witness *parent;
1453 MPASS(w->w_refcount == 0);
1454 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1462 * First, we run through the entire tree looking for any
1463 * witnesses that the outgoing witness is a child of. For
1464 * each parent that we find, we reparent all the direct
1465 * children of the outgoing witness to its parent.
1467 STAILQ_FOREACH(parent, list, w_typelist) {
1468 if (!isitmychild(parent, w))
1470 removechild(parent, w);
1474 * Now we go through and free up the child list of the
1477 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1478 nwcl = wcl->wcl_next;
1480 witness_child_free(wcl);
1484 * Detach from various lists and free.
1486 STAILQ_REMOVE(list, w, witness, w_typelist);
1487 STAILQ_REMOVE(&w_all, w, witness, w_list);
1494 * Add "child" as a direct child of "parent". Returns false if
1495 * we fail due to out of memory.
1498 insertchild(struct witness *parent, struct witness *child)
1500 struct witness_child_list_entry **wcl;
1502 MPASS(child != NULL && parent != NULL);
1505 * Insert "child" after "parent"
1507 wcl = &parent->w_children;
1508 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1509 wcl = &(*wcl)->wcl_next;
1511 *wcl = witness_child_get();
1516 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1523 itismychild(struct witness *parent, struct witness *child)
1525 struct witness_list *list;
1527 MPASS(child != NULL && parent != NULL);
1528 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1529 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1531 "%s: parent (%s) and child (%s) are not the same lock type",
1532 __func__, parent->w_class->lc_name,
1533 child->w_class->lc_name);
1535 if (!insertchild(parent, child))
1538 if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1546 removechild(struct witness *parent, struct witness *child)
1548 struct witness_child_list_entry **wcl, *wcl1;
1551 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1552 for (i = 0; i < (*wcl)->wcl_count; i++)
1553 if ((*wcl)->wcl_children[i] == child)
1557 (*wcl)->wcl_count--;
1558 if ((*wcl)->wcl_count > i)
1559 (*wcl)->wcl_children[i] =
1560 (*wcl)->wcl_children[(*wcl)->wcl_count];
1561 MPASS((*wcl)->wcl_children[i] != NULL);
1562 if ((*wcl)->wcl_count != 0)
1565 *wcl = wcl1->wcl_next;
1567 witness_child_free(wcl1);
1571 isitmychild(struct witness *parent, struct witness *child)
1573 struct witness_child_list_entry *wcl;
1576 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1577 for (i = 0; i < wcl->wcl_count; i++) {
1578 if (wcl->wcl_children[i] == child)
1586 isitmydescendant(struct witness *parent, struct witness *child)
1588 struct witness_child_list_entry *wcl;
1591 if (isitmychild(parent, child))
1594 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1596 for (i = 0; i < wcl->wcl_count; i++) {
1597 if (isitmydescendant(wcl->wcl_children[i], child))
1607 blessed(struct witness *w1, struct witness *w2)
1610 struct witness_blessed *b;
1612 for (i = 0; i < blessed_count; i++) {
1613 b = &blessed_list[i];
1614 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1615 if (strcmp(w2->w_name, b->b_lock2) == 0)
1619 if (strcmp(w1->w_name, b->b_lock2) == 0)
1620 if (strcmp(w2->w_name, b->b_lock1) == 0)
1627 static struct witness *
1632 if (witness_watch == 0) {
1633 mtx_unlock_spin(&w_mtx);
1636 if (STAILQ_EMPTY(&w_free)) {
1638 mtx_unlock_spin(&w_mtx);
1639 printf("%s: witness exhausted\n", __func__);
1642 w = STAILQ_FIRST(&w_free);
1643 STAILQ_REMOVE_HEAD(&w_free, w_list);
1645 bzero(w, sizeof(*w));
1650 witness_free(struct witness *w)
1653 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1657 static struct witness_child_list_entry *
1658 witness_child_get(void)
1660 struct witness_child_list_entry *wcl;
1662 if (witness_watch == 0) {
1663 mtx_unlock_spin(&w_mtx);
1669 mtx_unlock_spin(&w_mtx);
1670 printf("%s: witness exhausted\n", __func__);
1673 w_child_free = wcl->wcl_next;
1675 bzero(wcl, sizeof(*wcl));
1680 witness_child_free(struct witness_child_list_entry *wcl)
1683 wcl->wcl_next = w_child_free;
1688 static struct lock_list_entry *
1689 witness_lock_list_get(void)
1691 struct lock_list_entry *lle;
1693 if (witness_watch == 0)
1695 mtx_lock_spin(&w_mtx);
1696 lle = w_lock_list_free;
1699 mtx_unlock_spin(&w_mtx);
1700 printf("%s: witness exhausted\n", __func__);
1703 w_lock_list_free = lle->ll_next;
1704 mtx_unlock_spin(&w_mtx);
1705 bzero(lle, sizeof(*lle));
1710 witness_lock_list_free(struct lock_list_entry *lle)
1713 mtx_lock_spin(&w_mtx);
1714 lle->ll_next = w_lock_list_free;
1715 w_lock_list_free = lle;
1716 mtx_unlock_spin(&w_mtx);
1719 static struct lock_instance *
1720 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1722 struct lock_list_entry *lle;
1723 struct lock_instance *instance;
1726 for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1727 for (i = lle->ll_count - 1; i >= 0; i--) {
1728 instance = &lle->ll_children[i];
1729 if (instance->li_lock == lock)
1736 witness_list_lock(struct lock_instance *instance)
1738 struct lock_object *lock;
1740 lock = instance->li_lock;
1741 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1742 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
1743 if (lock->lo_type != lock->lo_name)
1744 printf(" (%s)", lock->lo_type);
1745 printf(" r = %d (%p) locked @ %s:%d\n",
1746 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1752 witness_thread_has_locks(struct thread *td)
1755 return (td->td_sleeplocks != NULL);
1759 witness_proc_has_locks(struct proc *p)
1763 FOREACH_THREAD_IN_PROC(p, td) {
1764 if (witness_thread_has_locks(td))
1772 witness_list_locks(struct lock_list_entry **lock_list)
1774 struct lock_list_entry *lle;
1778 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1779 for (i = lle->ll_count - 1; i >= 0; i--) {
1780 witness_list_lock(&lle->ll_children[i]);
1787 * This is a bit risky at best. We call this function when we have timed
1788 * out acquiring a spin lock, and we assume that the other CPU is stuck
1789 * with this lock held. So, we go groveling around in the other CPU's
1790 * per-cpu data to try to find the lock instance for this spin lock to
1791 * see when it was last acquired.
1794 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1796 struct lock_instance *instance;
1799 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1801 pc = pcpu_find(owner->td_oncpu);
1802 instance = find_instance(pc->pc_spinlocks, lock);
1803 if (instance != NULL)
1804 witness_list_lock(instance);
1808 witness_save(struct lock_object *lock, const char **filep, int *linep)
1810 struct lock_list_entry *lock_list;
1811 struct lock_instance *instance;
1812 struct lock_class *class;
1814 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1815 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1817 class = LOCK_CLASS(lock);
1818 if (class->lc_flags & LC_SLEEPLOCK)
1819 lock_list = curthread->td_sleeplocks;
1821 if (witness_skipspin)
1823 lock_list = PCPU_GET(spinlocks);
1825 instance = find_instance(lock_list, lock);
1826 if (instance == NULL)
1827 panic("%s: lock (%s) %s not locked", __func__,
1828 class->lc_name, lock->lo_name);
1829 *filep = instance->li_file;
1830 *linep = instance->li_line;
1834 witness_restore(struct lock_object *lock, const char *file, int line)
1836 struct lock_list_entry *lock_list;
1837 struct lock_instance *instance;
1838 struct lock_class *class;
1840 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1841 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1843 class = LOCK_CLASS(lock);
1844 if (class->lc_flags & LC_SLEEPLOCK)
1845 lock_list = curthread->td_sleeplocks;
1847 if (witness_skipspin)
1849 lock_list = PCPU_GET(spinlocks);
1851 instance = find_instance(lock_list, lock);
1852 if (instance == NULL)
1853 panic("%s: lock (%s) %s not locked", __func__,
1854 class->lc_name, lock->lo_name);
1855 lock->lo_witness->w_file = file;
1856 lock->lo_witness->w_line = line;
1857 instance->li_file = file;
1858 instance->li_line = line;
1862 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1864 #ifdef INVARIANT_SUPPORT
1865 struct lock_instance *instance;
1866 struct lock_class *class;
1868 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1870 class = LOCK_CLASS(lock);
1871 if ((class->lc_flags & LC_SLEEPLOCK) != 0)
1872 instance = find_instance(curthread->td_sleeplocks, lock);
1873 else if ((class->lc_flags & LC_SPINLOCK) != 0)
1874 instance = find_instance(PCPU_GET(spinlocks), lock);
1876 panic("Lock (%s) %s is not sleep or spin!",
1877 class->lc_name, lock->lo_name);
1879 file = fixup_filename(file);
1882 if (instance != NULL)
1883 panic("Lock (%s) %s locked @ %s:%d.",
1884 class->lc_name, lock->lo_name, file, line);
1887 case LA_LOCKED | LA_RECURSED:
1888 case LA_LOCKED | LA_NOTRECURSED:
1890 case LA_SLOCKED | LA_RECURSED:
1891 case LA_SLOCKED | LA_NOTRECURSED:
1893 case LA_XLOCKED | LA_RECURSED:
1894 case LA_XLOCKED | LA_NOTRECURSED:
1895 if (instance == NULL) {
1896 panic("Lock (%s) %s not locked @ %s:%d.",
1897 class->lc_name, lock->lo_name, file, line);
1900 if ((flags & LA_XLOCKED) != 0 &&
1901 (instance->li_flags & LI_EXCLUSIVE) == 0)
1902 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1903 class->lc_name, lock->lo_name, file, line);
1904 if ((flags & LA_SLOCKED) != 0 &&
1905 (instance->li_flags & LI_EXCLUSIVE) != 0)
1906 panic("Lock (%s) %s exclusively locked @ %s:%d.",
1907 class->lc_name, lock->lo_name, file, line);
1908 if ((flags & LA_RECURSED) != 0 &&
1909 (instance->li_flags & LI_RECURSEMASK) == 0)
1910 panic("Lock (%s) %s not recursed @ %s:%d.",
1911 class->lc_name, lock->lo_name, file, line);
1912 if ((flags & LA_NOTRECURSED) != 0 &&
1913 (instance->li_flags & LI_RECURSEMASK) != 0)
1914 panic("Lock (%s) %s recursed @ %s:%d.",
1915 class->lc_name, lock->lo_name, file, line);
1918 panic("Invalid lock assertion at %s:%d.", file, line);
1921 #endif /* INVARIANT_SUPPORT */
1926 witness_list(struct thread *td)
1929 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1930 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1932 if (witness_watch == 0)
1935 witness_list_locks(&td->td_sleeplocks);
1938 * We only handle spinlocks if td == curthread. This is somewhat broken
1939 * if td is currently executing on some other CPU and holds spin locks
1940 * as we won't display those locks. If we had a MI way of getting
1941 * the per-cpu data for a given cpu then we could use
1942 * td->td_oncpu to get the list of spinlocks for this thread
1945 * That still wouldn't really fix this unless we locked sched_lock
1946 * or stopped the other CPU to make sure it wasn't changing the list
1947 * out from under us. It is probably best to just not try to handle
1948 * threads on other CPU's for now.
1950 if (td == curthread && PCPU_GET(spinlocks) != NULL)
1951 witness_list_locks(PCPU_PTR(spinlocks));
1954 DB_SHOW_COMMAND(locks, db_witness_list)
1961 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1962 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1963 ((addr >> 16) % 16) * 10000;
1964 /* sx_slock(&allproc_lock); */
1965 FOREACH_PROC_IN_SYSTEM(p) {
1966 if (p->p_pid == pid)
1969 /* sx_sunlock(&allproc_lock); */
1971 db_printf("pid %d not found\n", pid);
1974 FOREACH_THREAD_IN_PROC(p, td) {
1983 DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1989 * It would be nice to list only threads and processes that actually
1990 * held sleep locks, but that information is currently not exported
1993 FOREACH_PROC_IN_SYSTEM(p) {
1994 if (!witness_proc_has_locks(p))
1996 FOREACH_THREAD_IN_PROC(p, td) {
1997 if (!witness_thread_has_locks(td))
1999 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2000 p->p_comm, td, td->td_tid);
2006 DB_SHOW_COMMAND(witness, db_witness_display)
2009 witness_display(db_printf);