2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 * promote products derived from this software without specific prior
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
36 * Implementation of the `witness' lock verifier. Originally implemented for
37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
43 * Pronunciation: 'wit-n&s
45 * Etymology: Middle English witnesse, from Old English witnes knowledge,
46 * testimony, witness, from 2wit
47 * Date: before 12th century
48 * 1 : attestation of a fact or event : TESTIMONY
49 * 2 : one that gives evidence; specifically : one who testifies in
50 * a cause or before a judicial tribunal
51 * 3 : one asked to be present at a transaction so as to be able to
52 * testify to its having taken place
53 * 4 : one who has personal knowledge of something
54 * 5 a : something serving as evidence or proof : SIGN
55 * b : public affirmation by word or example of usually
56 * religious faith or conviction <the heroic witness to divine
58 * 6 capitalized : a member of the Jehovah's Witnesses
62 * Special rules concerning Giant and lock orders:
64 * 1) Giant must be acquired before any other mutexes. Stated another way,
65 * no other mutex may be held when Giant is acquired.
67 * 2) Giant must be released when blocking on a sleepable lock.
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl(). Basically, when a thread sleeps, it must release
71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
74 * 3) Giant may be acquired before or after sleepable locks.
76 * This rule is also not quite as obvious. Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock. The second
79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute. Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
87 #include <sys/cdefs.h>
88 __FBSDID("$FreeBSD$");
91 #include "opt_hwpmc_hooks.h"
92 #include "opt_stack.h"
93 #include "opt_witness.h"
95 #include <sys/param.h>
98 #include <sys/kernel.h>
100 #include <sys/lock.h>
101 #include <sys/malloc.h>
102 #include <sys/mutex.h>
103 #include <sys/priv.h>
104 #include <sys/proc.h>
105 #include <sys/sbuf.h>
106 #include <sys/sched.h>
107 #include <sys/stack.h>
108 #include <sys/sysctl.h>
109 #include <sys/systm.h>
115 #include <machine/stdarg.h>
117 #if !defined(DDB) && !defined(STACK)
118 #error "DDB or STACK options are required for WITNESS"
121 /* Note that these traces do not work with KTR_ALQ. */
123 #define KTR_WITNESS KTR_SUBSYS
125 #define KTR_WITNESS 0
128 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */
129 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
130 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
132 /* Define this to check for blessed mutexes */
135 #define WITNESS_COUNT 1024
136 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
137 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
138 #define WITNESS_PENDLIST 768
140 /* Allocate 256 KB of stack data space */
141 #define WITNESS_LO_DATA_COUNT 2048
143 /* Prime, gives load factor of ~2 at full load */
144 #define WITNESS_LO_HASH_SIZE 1021
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
151 #define LOCK_NCHILDREN 5
152 #define LOCK_CHILDCOUNT 2048
154 #define MAX_W_NAME 64
156 #define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT)
157 #define FULLGRAPH_SBUF_SIZE 512
160 * These flags go in the witness relationship matrix and describe the
161 * relationship between any two struct witness objects.
163 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */
164 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
165 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
166 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
167 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
168 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
169 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
170 #define WITNESS_RELATED_MASK \
171 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
174 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
175 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
176 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
178 /* Descendant to ancestor flags */
179 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
181 /* Ancestor to descendant flags */
182 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2)
184 #define WITNESS_INDEX_ASSERT(i) \
185 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
187 MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
190 * Lock instances. A lock instance is the data associated with a lock while
191 * it is held by witness. For example, a lock instance will hold the
192 * recursion count of a lock. Lock instances are held in lists. Spin locks
193 * are held in a per-cpu list while sleep locks are held in per-thread list.
195 struct lock_instance {
196 struct lock_object *li_lock;
203 * A simple list type used to build the list of locks held by a thread
204 * or CPU. We can't simply embed the list in struct lock_object since a
205 * lock may be held by more than one thread if it is a shared lock. Locks
206 * are added to the head of the list, so we fill up each list entry from
207 * "the back" logically. To ease some of the arithmetic, we actually fill
208 * in each list entry the normal way (children[0] then children[1], etc.) but
209 * when we traverse the list we read children[count-1] as the first entry
210 * down to children[0] as the final entry.
212 struct lock_list_entry {
213 struct lock_list_entry *ll_next;
214 struct lock_instance ll_children[LOCK_NCHILDREN];
219 * The main witness structure. One of these per named lock type in the system
220 * (for example, "vnode interlock").
223 char w_name[MAX_W_NAME];
224 uint32_t w_index; /* Index in the relationship matrix */
225 struct lock_class *w_class;
226 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
227 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
228 struct witness *w_hash_next; /* Linked list in hash buckets. */
229 const char *w_file; /* File where last acquired */
230 uint32_t w_line; /* Line where last acquired */
232 uint16_t w_num_ancestors; /* direct/indirect
234 uint16_t w_num_descendants; /* direct/indirect
235 * descendant count */
237 unsigned w_displayed:1;
238 unsigned w_reversed:1;
241 STAILQ_HEAD(witness_list, witness);
244 * The witness hash table. Keys are witness names (const char *), elements are
245 * witness objects (struct witness *).
247 struct witness_hash {
248 struct witness *wh_array[WITNESS_HASH_SIZE];
254 * Key type for the lock order data hash table.
256 struct witness_lock_order_key {
261 struct witness_lock_order_data {
262 struct stack wlod_stack;
263 struct witness_lock_order_key wlod_key;
264 struct witness_lock_order_data *wlod_next;
268 * The witness lock order data hash table. Keys are witness index tuples
269 * (struct witness_lock_order_key), elements are lock order data objects
270 * (struct witness_lock_order_data).
272 struct witness_lock_order_hash {
273 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE];
279 struct witness_blessed {
285 struct witness_pendhelp {
287 struct lock_object *wh_lock;
290 struct witness_order_list_entry {
292 struct lock_class *w_class;
296 * Returns 0 if one of the locks is a spin lock and the other is not.
297 * Returns 1 otherwise.
300 witness_lock_type_equal(struct witness *w1, struct witness *w2)
303 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
308 witness_lock_order_key_empty(const struct witness_lock_order_key *key)
311 return (key->from == 0 && key->to == 0);
315 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
316 const struct witness_lock_order_key *b)
319 return (a->from == b->from && a->to == b->to);
322 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
325 static void _witness_debugger(int cond, const char *msg);
327 static void adopt(struct witness *parent, struct witness *child);
329 static int blessed(struct witness *, struct witness *);
331 static void depart(struct witness *w);
332 static struct witness *enroll(const char *description,
333 struct lock_class *lock_class);
334 static struct lock_instance *find_instance(struct lock_list_entry *list,
335 struct lock_object *lock);
336 static int isitmychild(struct witness *parent, struct witness *child);
337 static int isitmydescendant(struct witness *parent, struct witness *child);
338 static void itismychild(struct witness *parent, struct witness *child);
339 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
340 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
341 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
342 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
344 static void witness_ddb_compute_levels(void);
345 static void witness_ddb_display(int(*)(const char *fmt, ...));
346 static void witness_ddb_display_descendants(int(*)(const char *fmt, ...),
347 struct witness *, int indent);
348 static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
349 struct witness_list *list);
350 static void witness_ddb_level_descendants(struct witness *parent, int l);
351 static void witness_ddb_list(struct thread *td);
353 static void witness_free(struct witness *m);
354 static struct witness *witness_get(void);
355 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size);
356 static struct witness *witness_hash_get(const char *key);
357 static void witness_hash_put(struct witness *w);
358 static void witness_init_hash_tables(void);
359 static void witness_increment_graph_generation(void);
360 static void witness_lock_list_free(struct lock_list_entry *lle);
361 static struct lock_list_entry *witness_lock_list_get(void);
362 static int witness_lock_order_add(struct witness *parent,
363 struct witness *child);
364 static int witness_lock_order_check(struct witness *parent,
365 struct witness *child);
366 static struct witness_lock_order_data *witness_lock_order_get(
367 struct witness *parent,
368 struct witness *child);
369 static void witness_list_lock(struct lock_instance *instance,
370 int (*prnt)(const char *fmt, ...));
371 static void witness_setflag(struct lock_object *lock, int flag, int set);
374 #define witness_debugger(c) _witness_debugger(c, __func__)
376 #define witness_debugger(c)
379 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking");
382 * If set to 0, lock order checking is disabled. If set to -1,
383 * witness is completely disabled. Otherwise witness performs full
384 * lock order checking for all locks. At runtime, lock order checking
385 * may be toggled. However, witness cannot be reenabled once it is
386 * completely disabled.
388 static int witness_watch = 1;
389 TUNABLE_INT("debug.witness.watch", &witness_watch);
390 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
391 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
395 * When KDB is enabled and witness_kdb is 1, it will cause the system
396 * to drop into kdebug() when:
397 * - a lock hierarchy violation occurs
398 * - locks are held when going to sleep.
405 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
406 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
409 * When KDB is enabled and witness_trace is 1, it will cause the system
410 * to print a stack trace:
411 * - a lock hierarchy violation occurs
412 * - locks are held when going to sleep.
414 int witness_trace = 1;
415 TUNABLE_INT("debug.witness.trace", &witness_trace);
416 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
419 #ifdef WITNESS_SKIPSPIN
420 int witness_skipspin = 1;
422 int witness_skipspin = 0;
424 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
425 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
429 * Call this to print out the relations between locks.
431 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
432 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
435 * Call this to print out the witness faulty stacks.
437 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
438 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
440 static struct mtx w_mtx;
443 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
444 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
447 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
448 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
451 static struct lock_list_entry *w_lock_list_free = NULL;
452 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
453 static u_int pending_cnt;
455 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
456 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
457 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
458 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
461 static struct witness *w_data;
462 static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
463 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
464 static struct witness_hash w_hash; /* The witness hash table. */
466 /* The lock order data hash */
467 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
468 static struct witness_lock_order_data *w_lofree = NULL;
469 static struct witness_lock_order_hash w_lohash;
470 static int w_max_used_index = 0;
471 static unsigned int w_generation = 0;
472 static const char w_notrunning[] = "Witness not running\n";
473 static const char w_stillcold[] = "Witness is still cold\n";
476 static struct witness_order_list_entry order_lists[] = {
480 { "proctree", &lock_class_sx },
481 { "allproc", &lock_class_sx },
482 { "allprison", &lock_class_sx },
487 { "Giant", &lock_class_mtx_sleep },
488 { "pipe mutex", &lock_class_mtx_sleep },
489 { "sigio lock", &lock_class_mtx_sleep },
490 { "process group", &lock_class_mtx_sleep },
491 { "process lock", &lock_class_mtx_sleep },
492 { "session", &lock_class_mtx_sleep },
493 { "uidinfo hash", &lock_class_rw },
495 { "pmc-sleep", &lock_class_mtx_sleep },
497 { "time lock", &lock_class_mtx_sleep },
502 { "accept", &lock_class_mtx_sleep },
503 { "so_snd", &lock_class_mtx_sleep },
504 { "so_rcv", &lock_class_mtx_sleep },
505 { "sellck", &lock_class_mtx_sleep },
510 { "so_rcv", &lock_class_mtx_sleep },
511 { "radix node head", &lock_class_rw },
512 { "rtentry", &lock_class_mtx_sleep },
513 { "ifaddr", &lock_class_mtx_sleep },
517 * protocol locks before interface locks, after UDP locks.
519 { "udpinp", &lock_class_rw },
520 { "in_multi_mtx", &lock_class_mtx_sleep },
521 { "igmp_mtx", &lock_class_mtx_sleep },
522 { "if_addr_mtx", &lock_class_mtx_sleep },
526 * protocol locks before interface locks, after UDP locks.
528 { "udpinp", &lock_class_rw },
529 { "in6_multi_mtx", &lock_class_mtx_sleep },
530 { "mld_mtx", &lock_class_mtx_sleep },
531 { "if_addr_mtx", &lock_class_mtx_sleep },
534 * UNIX Domain Sockets
536 { "unp_global_rwlock", &lock_class_rw },
537 { "unp_list_lock", &lock_class_mtx_sleep },
538 { "unp", &lock_class_mtx_sleep },
539 { "so_snd", &lock_class_mtx_sleep },
544 { "udp", &lock_class_rw },
545 { "udpinp", &lock_class_rw },
546 { "so_snd", &lock_class_mtx_sleep },
551 { "tcp", &lock_class_rw },
552 { "tcpinp", &lock_class_rw },
553 { "so_snd", &lock_class_mtx_sleep },
558 { "ddp_list_mtx", &lock_class_mtx_sleep },
559 { "ddp_mtx", &lock_class_mtx_sleep },
564 { "bpf global lock", &lock_class_mtx_sleep },
565 { "bpf interface lock", &lock_class_mtx_sleep },
566 { "bpf cdev lock", &lock_class_mtx_sleep },
571 { "nfsd_mtx", &lock_class_mtx_sleep },
572 { "so_snd", &lock_class_mtx_sleep },
578 { "802.11 com lock", &lock_class_mtx_sleep},
583 { "network driver", &lock_class_mtx_sleep},
589 { "ng_node", &lock_class_mtx_sleep },
590 { "ng_worklist", &lock_class_mtx_sleep },
595 { "system map", &lock_class_mtx_sleep },
596 { "vm page queue mutex", &lock_class_mtx_sleep },
597 { "vnode interlock", &lock_class_mtx_sleep },
598 { "cdev", &lock_class_mtx_sleep },
604 { "vm object", &lock_class_mtx_sleep },
605 { "page lock", &lock_class_mtx_sleep },
606 { "vm page queue mutex", &lock_class_mtx_sleep },
607 { "pmap", &lock_class_mtx_sleep },
610 * kqueue/VFS interaction
612 { "kqueue", &lock_class_mtx_sleep },
613 { "struct mount mtx", &lock_class_mtx_sleep },
614 { "vnode interlock", &lock_class_mtx_sleep },
619 { "dn->dn_mtx", &lock_class_sx },
620 { "dr->dt.di.dr_mtx", &lock_class_sx },
621 { "db->db_mtx", &lock_class_sx },
627 { "ap boot", &lock_class_mtx_spin },
629 { "rm.mutex_mtx", &lock_class_mtx_spin },
630 { "sio", &lock_class_mtx_spin },
631 { "scrlock", &lock_class_mtx_spin },
633 { "cy", &lock_class_mtx_spin },
636 { "pcib_mtx", &lock_class_mtx_spin },
637 { "rtc_mtx", &lock_class_mtx_spin },
639 { "scc_hwmtx", &lock_class_mtx_spin },
640 { "uart_hwmtx", &lock_class_mtx_spin },
641 { "fast_taskqueue", &lock_class_mtx_spin },
642 { "intr table", &lock_class_mtx_spin },
644 { "pmc-per-proc", &lock_class_mtx_spin },
646 { "process slock", &lock_class_mtx_spin },
647 { "sleepq chain", &lock_class_mtx_spin },
648 { "umtx lock", &lock_class_mtx_spin },
649 { "rm_spinlock", &lock_class_mtx_spin },
650 { "turnstile chain", &lock_class_mtx_spin },
651 { "turnstile lock", &lock_class_mtx_spin },
652 { "sched lock", &lock_class_mtx_spin },
653 { "td_contested", &lock_class_mtx_spin },
654 { "callout", &lock_class_mtx_spin },
655 { "entropy harvest mutex", &lock_class_mtx_spin },
656 { "syscons video lock", &lock_class_mtx_spin },
658 { "smp rendezvous", &lock_class_mtx_spin },
661 { "tlb0", &lock_class_mtx_spin },
666 { "intrcnt", &lock_class_mtx_spin },
667 { "icu", &lock_class_mtx_spin },
668 #if defined(SMP) && defined(__sparc64__)
669 { "ipi", &lock_class_mtx_spin },
672 { "allpmaps", &lock_class_mtx_spin },
673 { "descriptor tables", &lock_class_mtx_spin },
675 { "clk", &lock_class_mtx_spin },
676 { "cpuset", &lock_class_mtx_spin },
677 { "mprof lock", &lock_class_mtx_spin },
678 { "zombie lock", &lock_class_mtx_spin },
679 { "ALD Queue", &lock_class_mtx_spin },
681 { "MCA spin lock", &lock_class_mtx_spin },
683 #if defined(__i386__) || defined(__amd64__)
684 { "pcicfg", &lock_class_mtx_spin },
685 { "NDIS thread lock", &lock_class_mtx_spin },
687 { "tw_osl_io_lock", &lock_class_mtx_spin },
688 { "tw_osl_q_lock", &lock_class_mtx_spin },
689 { "tw_cl_io_lock", &lock_class_mtx_spin },
690 { "tw_cl_intr_lock", &lock_class_mtx_spin },
691 { "tw_cl_gen_lock", &lock_class_mtx_spin },
693 { "pmc-leaf", &lock_class_mtx_spin },
695 { "blocked lock", &lock_class_mtx_spin },
702 * Pairs of locks which have been blessed
703 * Don't complain about order problems with blessed locks
705 static struct witness_blessed blessed_list[] = {
707 static int blessed_count =
708 sizeof(blessed_list) / sizeof(struct witness_blessed);
712 * This global is set to 0 once it becomes safe to use the witness code.
714 static int witness_cold = 1;
717 * This global is set to 1 once the static lock orders have been enrolled
718 * so that a warning can be issued for any spin locks enrolled later.
720 static int witness_spin_warn = 0;
722 /* Trim useless garbage from filenames. */
724 fixup_filename(const char *file)
729 while (strncmp(file, "../", 3) == 0)
735 * The WITNESS-enabled diagnostic code. Note that the witness code does
736 * assume that the early boot is single-threaded at least until after this
737 * routine is completed.
740 witness_initialize(void *dummy __unused)
742 struct lock_object *lock;
743 struct witness_order_list_entry *order;
744 struct witness *w, *w1;
747 w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
751 * We have to release Giant before initializing its witness
752 * structure so that WITNESS doesn't get confused.
755 mtx_assert(&Giant, MA_NOTOWNED);
757 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
758 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
759 MTX_NOWITNESS | MTX_NOPROFILE);
760 for (i = WITNESS_COUNT - 1; i >= 0; i--) {
762 memset(w, 0, sizeof(*w));
763 w_data[i].w_index = i; /* Witness index never changes. */
766 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
767 ("%s: Invalid list of free witness objects", __func__));
769 /* Witness with index 0 is not used to aid in debugging. */
770 STAILQ_REMOVE_HEAD(&w_free, w_list);
774 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
776 for (i = 0; i < LOCK_CHILDCOUNT; i++)
777 witness_lock_list_free(&w_locklistdata[i]);
778 witness_init_hash_tables();
780 /* First add in all the specified order lists. */
781 for (order = order_lists; order->w_name != NULL; order++) {
782 w = enroll(order->w_name, order->w_class);
785 w->w_file = "order list";
786 for (order++; order->w_name != NULL; order++) {
787 w1 = enroll(order->w_name, order->w_class);
790 w1->w_file = "order list";
795 witness_spin_warn = 1;
797 /* Iterate through all locks and add them to witness. */
798 for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
799 lock = pending_locks[i].wh_lock;
800 KASSERT(lock->lo_flags & LO_WITNESS,
801 ("%s: lock %s is on pending list but not LO_WITNESS",
802 __func__, lock->lo_name));
803 lock->lo_witness = enroll(pending_locks[i].wh_type,
807 /* Mark the witness code as being ready for use. */
812 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
816 witness_init(struct lock_object *lock, const char *type)
818 struct lock_class *class;
820 /* Various sanity checks. */
821 class = LOCK_CLASS(lock);
822 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
823 (class->lc_flags & LC_RECURSABLE) == 0)
824 panic("%s: lock (%s) %s can not be recursable", __func__,
825 class->lc_name, lock->lo_name);
826 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
827 (class->lc_flags & LC_SLEEPABLE) == 0)
828 panic("%s: lock (%s) %s can not be sleepable", __func__,
829 class->lc_name, lock->lo_name);
830 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
831 (class->lc_flags & LC_UPGRADABLE) == 0)
832 panic("%s: lock (%s) %s can not be upgradable", __func__,
833 class->lc_name, lock->lo_name);
836 * If we shouldn't watch this lock, then just clear lo_witness.
837 * Otherwise, if witness_cold is set, then it is too early to
838 * enroll this lock, so defer it to witness_initialize() by adding
839 * it to the pending_locks list. If it is not too early, then enroll
842 if (witness_watch < 1 || panicstr != NULL ||
843 (lock->lo_flags & LO_WITNESS) == 0)
844 lock->lo_witness = NULL;
845 else if (witness_cold) {
846 pending_locks[pending_cnt].wh_lock = lock;
847 pending_locks[pending_cnt++].wh_type = type;
848 if (pending_cnt > WITNESS_PENDLIST)
849 panic("%s: pending locks list is too small, bump it\n",
852 lock->lo_witness = enroll(type, class);
856 witness_destroy(struct lock_object *lock)
858 struct lock_class *class;
861 class = LOCK_CLASS(lock);
864 panic("lock (%s) %s destroyed while witness_cold",
865 class->lc_name, lock->lo_name);
867 /* XXX: need to verify that no one holds the lock */
868 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
870 w = lock->lo_witness;
872 mtx_lock_spin(&w_mtx);
873 MPASS(w->w_refcount > 0);
876 if (w->w_refcount == 0)
878 mtx_unlock_spin(&w_mtx);
883 witness_ddb_compute_levels(void)
888 * First clear all levels.
890 STAILQ_FOREACH(w, &w_all, w_list)
894 * Look for locks with no parents and level all their descendants.
896 STAILQ_FOREACH(w, &w_all, w_list) {
898 /* If the witness has ancestors (is not a root), skip it. */
899 if (w->w_num_ancestors > 0)
901 witness_ddb_level_descendants(w, 0);
906 witness_ddb_level_descendants(struct witness *w, int l)
910 if (w->w_ddb_level >= l)
916 for (i = 1; i <= w_max_used_index; i++) {
917 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
918 witness_ddb_level_descendants(&w_data[i], l);
923 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
924 struct witness *w, int indent)
928 for (i = 0; i < indent; i++)
930 prnt("%s (type: %s, depth: %d, active refs: %d)",
931 w->w_name, w->w_class->lc_name,
932 w->w_ddb_level, w->w_refcount);
933 if (w->w_displayed) {
934 prnt(" -- (already displayed)\n");
938 if (w->w_file != NULL && w->w_line != 0)
939 prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
942 prnt(" -- never acquired\n");
944 WITNESS_INDEX_ASSERT(w->w_index);
945 for (i = 1; i <= w_max_used_index; i++) {
946 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
947 witness_ddb_display_descendants(prnt, &w_data[i],
953 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
954 struct witness_list *list)
958 STAILQ_FOREACH(w, list, w_typelist) {
959 if (w->w_file == NULL || w->w_ddb_level > 0)
962 /* This lock has no anscestors - display its descendants. */
963 witness_ddb_display_descendants(prnt, w, 0);
968 witness_ddb_display(int(*prnt)(const char *fmt, ...))
972 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
973 witness_ddb_compute_levels();
975 /* Clear all the displayed flags. */
976 STAILQ_FOREACH(w, &w_all, w_list)
980 * First, handle sleep locks which have been acquired at least
983 prnt("Sleep locks:\n");
984 witness_ddb_display_list(prnt, &w_sleep);
987 * Now do spin locks which have been acquired at least once.
989 prnt("\nSpin locks:\n");
990 witness_ddb_display_list(prnt, &w_spin);
993 * Finally, any locks which have not been acquired yet.
995 prnt("\nLocks which were never acquired:\n");
996 STAILQ_FOREACH(w, &w_all, w_list) {
997 if (w->w_file != NULL || w->w_refcount == 0)
999 prnt("%s (type: %s, depth: %d)\n", w->w_name,
1000 w->w_class->lc_name, w->w_ddb_level);
1006 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1009 if (witness_watch == -1 || panicstr != NULL)
1012 /* Require locks that witness knows about. */
1013 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1014 lock2->lo_witness == NULL)
1017 mtx_assert(&w_mtx, MA_NOTOWNED);
1018 mtx_lock_spin(&w_mtx);
1021 * If we already have either an explicit or implied lock order that
1022 * is the other way around, then return an error.
1024 if (witness_watch &&
1025 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1026 mtx_unlock_spin(&w_mtx);
1030 /* Try to add the new order. */
1031 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1032 lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1033 itismychild(lock1->lo_witness, lock2->lo_witness);
1034 mtx_unlock_spin(&w_mtx);
1039 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1040 int line, struct lock_object *interlock)
1042 struct lock_list_entry *lock_list, *lle;
1043 struct lock_instance *lock1, *lock2, *plock;
1044 struct lock_class *class;
1045 struct witness *w, *w1;
1049 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1053 w = lock->lo_witness;
1054 class = LOCK_CLASS(lock);
1057 if (class->lc_flags & LC_SLEEPLOCK) {
1060 * Since spin locks include a critical section, this check
1061 * implicitly enforces a lock order of all sleep locks before
1064 if (td->td_critnest != 0 && !kdb_active)
1065 panic("blockable sleep lock (%s) %s @ %s:%d",
1066 class->lc_name, lock->lo_name,
1067 fixup_filename(file), line);
1070 * If this is the first lock acquired then just return as
1071 * no order checking is needed.
1073 lock_list = td->td_sleeplocks;
1074 if (lock_list == NULL || lock_list->ll_count == 0)
1079 * If this is the first lock, just return as no order
1080 * checking is needed. Avoid problems with thread
1081 * migration pinning the thread while checking if
1082 * spinlocks are held. If at least one spinlock is held
1083 * the thread is in a safe path and it is allowed to
1087 lock_list = PCPU_GET(spinlocks);
1088 if (lock_list == NULL || lock_list->ll_count == 0) {
1096 * Check to see if we are recursing on a lock we already own. If
1097 * so, make sure that we don't mismatch exclusive and shared lock
1100 lock1 = find_instance(lock_list, lock);
1101 if (lock1 != NULL) {
1102 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1103 (flags & LOP_EXCLUSIVE) == 0) {
1104 printf("shared lock of (%s) %s @ %s:%d\n",
1105 class->lc_name, lock->lo_name,
1106 fixup_filename(file), line);
1107 printf("while exclusively locked from %s:%d\n",
1108 fixup_filename(lock1->li_file), lock1->li_line);
1109 panic("share->excl");
1111 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1112 (flags & LOP_EXCLUSIVE) != 0) {
1113 printf("exclusive lock of (%s) %s @ %s:%d\n",
1114 class->lc_name, lock->lo_name,
1115 fixup_filename(file), line);
1116 printf("while share locked from %s:%d\n",
1117 fixup_filename(lock1->li_file), lock1->li_line);
1118 panic("excl->share");
1124 * Find the previously acquired lock, but ignore interlocks.
1126 plock = &lock_list->ll_children[lock_list->ll_count - 1];
1127 if (interlock != NULL && plock->li_lock == interlock) {
1128 if (lock_list->ll_count > 1)
1130 &lock_list->ll_children[lock_list->ll_count - 2];
1132 lle = lock_list->ll_next;
1135 * The interlock is the only lock we hold, so
1140 plock = &lle->ll_children[lle->ll_count - 1];
1145 * Try to perform most checks without a lock. If this succeeds we
1146 * can skip acquiring the lock and return success.
1148 w1 = plock->li_lock->lo_witness;
1149 if (witness_lock_order_check(w1, w))
1153 * Check for duplicate locks of the same type. Note that we only
1154 * have to check for this on the last lock we just acquired. Any
1155 * other cases will be caught as lock order violations.
1157 mtx_lock_spin(&w_mtx);
1158 witness_lock_order_add(w1, w);
1161 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1162 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1163 w_rmatrix[i][i] |= WITNESS_REVERSAL;
1165 mtx_unlock_spin(&w_mtx);
1167 "acquiring duplicate lock of same type: \"%s\"\n",
1169 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1170 fixup_filename(plock->li_file), plock->li_line);
1171 printf(" 2nd %s @ %s:%d\n", lock->lo_name,
1172 fixup_filename(file), line);
1173 witness_debugger(1);
1175 mtx_unlock_spin(&w_mtx);
1178 mtx_assert(&w_mtx, MA_OWNED);
1181 * If we know that the lock we are acquiring comes after
1182 * the lock we most recently acquired in the lock order tree,
1183 * then there is no need for any further checks.
1185 if (isitmychild(w1, w))
1188 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1189 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1191 MPASS(j < WITNESS_COUNT);
1192 lock1 = &lle->ll_children[i];
1195 * Ignore the interlock the first time we see it.
1197 if (interlock != NULL && interlock == lock1->li_lock) {
1203 * If this lock doesn't undergo witness checking,
1206 w1 = lock1->li_lock->lo_witness;
1208 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1209 ("lock missing witness structure"));
1214 * If we are locking Giant and this is a sleepable
1215 * lock, then skip it.
1217 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1218 lock == &Giant.lock_object)
1222 * If we are locking a sleepable lock and this lock
1223 * is Giant, then skip it.
1225 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1226 lock1->li_lock == &Giant.lock_object)
1230 * If we are locking a sleepable lock and this lock
1231 * isn't sleepable, we want to treat it as a lock
1232 * order violation to enfore a general lock order of
1233 * sleepable locks before non-sleepable locks.
1235 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1236 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1240 * If we are locking Giant and this is a non-sleepable
1241 * lock, then treat it as a reversal.
1243 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1244 lock == &Giant.lock_object)
1248 * Check the lock order hierarchy for a reveresal.
1250 if (!isitmydescendant(w, w1))
1255 * We have a lock order violation, check to see if it
1256 * is allowed or has already been yelled about.
1261 * If the lock order is blessed, just bail. We don't
1262 * look for other lock order violations though, which
1269 /* Bail if this violation is known */
1270 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1273 /* Record this as a violation */
1274 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1275 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1276 w->w_reversed = w1->w_reversed = 1;
1277 witness_increment_graph_generation();
1278 mtx_unlock_spin(&w_mtx);
1281 * Ok, yell about it.
1283 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1284 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1286 "lock order reversal: (sleepable after non-sleepable)\n");
1287 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1288 && lock == &Giant.lock_object)
1290 "lock order reversal: (Giant after non-sleepable)\n");
1292 printf("lock order reversal:\n");
1295 * Try to locate an earlier lock with
1296 * witness w in our list.
1299 lock2 = &lle->ll_children[i];
1300 MPASS(lock2->li_lock != NULL);
1301 if (lock2->li_lock->lo_witness == w)
1303 if (i == 0 && lle->ll_next != NULL) {
1305 i = lle->ll_count - 1;
1306 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1311 printf(" 1st %p %s (%s) @ %s:%d\n",
1312 lock1->li_lock, lock1->li_lock->lo_name,
1313 w1->w_name, fixup_filename(lock1->li_file),
1315 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1316 lock->lo_name, w->w_name,
1317 fixup_filename(file), line);
1319 printf(" 1st %p %s (%s) @ %s:%d\n",
1320 lock2->li_lock, lock2->li_lock->lo_name,
1321 lock2->li_lock->lo_witness->w_name,
1322 fixup_filename(lock2->li_file),
1324 printf(" 2nd %p %s (%s) @ %s:%d\n",
1325 lock1->li_lock, lock1->li_lock->lo_name,
1326 w1->w_name, fixup_filename(lock1->li_file),
1328 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1329 lock->lo_name, w->w_name,
1330 fixup_filename(file), line);
1332 witness_debugger(1);
1338 * If requested, build a new lock order. However, don't build a new
1339 * relationship between a sleepable lock and Giant if it is in the
1340 * wrong direction. The correct lock order is that sleepable locks
1341 * always come before Giant.
1343 if (flags & LOP_NEWORDER &&
1344 !(plock->li_lock == &Giant.lock_object &&
1345 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1346 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1347 w->w_name, plock->li_lock->lo_witness->w_name);
1348 itismychild(plock->li_lock->lo_witness, w);
1351 mtx_unlock_spin(&w_mtx);
1355 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1357 struct lock_list_entry **lock_list, *lle;
1358 struct lock_instance *instance;
1362 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1365 w = lock->lo_witness;
1368 /* Determine lock list for this lock. */
1369 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1370 lock_list = &td->td_sleeplocks;
1372 lock_list = PCPU_PTR(spinlocks);
1374 /* Check to see if we are recursing on a lock we already own. */
1375 instance = find_instance(*lock_list, lock);
1376 if (instance != NULL) {
1377 instance->li_flags++;
1378 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1379 td->td_proc->p_pid, lock->lo_name,
1380 instance->li_flags & LI_RECURSEMASK);
1381 instance->li_file = file;
1382 instance->li_line = line;
1386 /* Update per-witness last file and line acquire. */
1390 /* Find the next open lock instance in the list and fill it. */
1392 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1393 lle = witness_lock_list_get();
1396 lle->ll_next = *lock_list;
1397 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1398 td->td_proc->p_pid, lle);
1401 instance = &lle->ll_children[lle->ll_count++];
1402 instance->li_lock = lock;
1403 instance->li_line = line;
1404 instance->li_file = file;
1405 if ((flags & LOP_EXCLUSIVE) != 0)
1406 instance->li_flags = LI_EXCLUSIVE;
1408 instance->li_flags = 0;
1409 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1410 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1414 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1416 struct lock_instance *instance;
1417 struct lock_class *class;
1419 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1420 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1422 class = LOCK_CLASS(lock);
1423 if (witness_watch) {
1424 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1425 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1426 class->lc_name, lock->lo_name,
1427 fixup_filename(file), line);
1428 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1429 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1430 class->lc_name, lock->lo_name,
1431 fixup_filename(file), line);
1433 instance = find_instance(curthread->td_sleeplocks, lock);
1434 if (instance == NULL)
1435 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1436 class->lc_name, lock->lo_name,
1437 fixup_filename(file), line);
1438 if (witness_watch) {
1439 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1440 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1441 class->lc_name, lock->lo_name,
1442 fixup_filename(file), line);
1443 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1444 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1445 class->lc_name, lock->lo_name,
1446 instance->li_flags & LI_RECURSEMASK,
1447 fixup_filename(file), line);
1449 instance->li_flags |= LI_EXCLUSIVE;
1453 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1456 struct lock_instance *instance;
1457 struct lock_class *class;
1459 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1460 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1462 class = LOCK_CLASS(lock);
1463 if (witness_watch) {
1464 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1465 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1466 class->lc_name, lock->lo_name,
1467 fixup_filename(file), line);
1468 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1469 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1470 class->lc_name, lock->lo_name,
1471 fixup_filename(file), line);
1473 instance = find_instance(curthread->td_sleeplocks, lock);
1474 if (instance == NULL)
1475 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1476 class->lc_name, lock->lo_name,
1477 fixup_filename(file), line);
1478 if (witness_watch) {
1479 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1480 panic("downgrade of shared lock (%s) %s @ %s:%d",
1481 class->lc_name, lock->lo_name,
1482 fixup_filename(file), line);
1483 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1484 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1485 class->lc_name, lock->lo_name,
1486 instance->li_flags & LI_RECURSEMASK,
1487 fixup_filename(file), line);
1489 instance->li_flags &= ~LI_EXCLUSIVE;
1493 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1495 struct lock_list_entry **lock_list, *lle;
1496 struct lock_instance *instance;
1497 struct lock_class *class;
1502 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1505 class = LOCK_CLASS(lock);
1507 /* Find lock instance associated with this lock. */
1508 if (class->lc_flags & LC_SLEEPLOCK)
1509 lock_list = &td->td_sleeplocks;
1511 lock_list = PCPU_PTR(spinlocks);
1513 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1514 for (i = 0; i < (*lock_list)->ll_count; i++) {
1515 instance = &(*lock_list)->ll_children[i];
1516 if (instance->li_lock == lock)
1521 * When disabling WITNESS through witness_watch we could end up in
1522 * having registered locks in the td_sleeplocks queue.
1523 * We have to make sure we flush these queues, so just search for
1524 * eventual register locks and remove them.
1526 if (witness_watch > 0)
1527 panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1528 lock->lo_name, fixup_filename(file), line);
1533 /* First, check for shared/exclusive mismatches. */
1534 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1535 (flags & LOP_EXCLUSIVE) == 0) {
1536 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1537 lock->lo_name, fixup_filename(file), line);
1538 printf("while exclusively locked from %s:%d\n",
1539 fixup_filename(instance->li_file), instance->li_line);
1540 panic("excl->ushare");
1542 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1543 (flags & LOP_EXCLUSIVE) != 0) {
1544 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1545 lock->lo_name, fixup_filename(file), line);
1546 printf("while share locked from %s:%d\n",
1547 fixup_filename(instance->li_file),
1549 panic("share->uexcl");
1551 /* If we are recursed, unrecurse. */
1552 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1553 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1554 td->td_proc->p_pid, instance->li_lock->lo_name,
1555 instance->li_flags);
1556 instance->li_flags--;
1559 /* The lock is now being dropped, check for NORELEASE flag */
1560 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1561 printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1562 lock->lo_name, fixup_filename(file), line);
1563 panic("lock marked norelease");
1566 /* Otherwise, remove this item from the list. */
1568 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1569 td->td_proc->p_pid, instance->li_lock->lo_name,
1570 (*lock_list)->ll_count - 1);
1571 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1572 (*lock_list)->ll_children[j] =
1573 (*lock_list)->ll_children[j + 1];
1574 (*lock_list)->ll_count--;
1578 * In order to reduce contention on w_mtx, we want to keep always an
1579 * head object into lists so that frequent allocation from the
1580 * free witness pool (and subsequent locking) is avoided.
1581 * In order to maintain the current code simple, when the head
1582 * object is totally unloaded it means also that we do not have
1583 * further objects in the list, so the list ownership needs to be
1584 * hand over to another object if the current head needs to be freed.
1586 if ((*lock_list)->ll_count == 0) {
1587 if (*lock_list == lle) {
1588 if (lle->ll_next == NULL)
1592 *lock_list = lle->ll_next;
1593 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1594 td->td_proc->p_pid, lle);
1595 witness_lock_list_free(lle);
1600 witness_thread_exit(struct thread *td)
1602 struct lock_list_entry *lle;
1605 lle = td->td_sleeplocks;
1606 if (lle == NULL || panicstr != NULL)
1608 if (lle->ll_count != 0) {
1609 for (n = 0; lle != NULL; lle = lle->ll_next)
1610 for (i = lle->ll_count - 1; i >= 0; i--) {
1612 printf("Thread %p exiting with the following locks held:\n",
1615 witness_list_lock(&lle->ll_children[i], printf);
1618 panic("Thread %p cannot exit while holding sleeplocks\n", td);
1620 witness_lock_list_free(lle);
1624 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1625 * exempt Giant and sleepable locks from the checks as well. If any
1626 * non-exempt locks are held, then a supplied message is printed to the
1627 * console along with a list of the offending locks. If indicated in the
1628 * flags then a failure results in a panic as well.
1631 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1633 struct lock_list_entry *lock_list, *lle;
1634 struct lock_instance *lock1;
1639 if (witness_cold || witness_watch < 1 || panicstr != NULL)
1643 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1644 for (i = lle->ll_count - 1; i >= 0; i--) {
1645 lock1 = &lle->ll_children[i];
1646 if (lock1->li_lock == lock)
1648 if (flags & WARN_GIANTOK &&
1649 lock1->li_lock == &Giant.lock_object)
1651 if (flags & WARN_SLEEPOK &&
1652 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1658 printf(" with the following");
1659 if (flags & WARN_SLEEPOK)
1660 printf(" non-sleepable");
1661 printf(" locks held:\n");
1664 witness_list_lock(lock1, printf);
1668 * Pin the thread in order to avoid problems with thread migration.
1669 * Once that all verifies are passed about spinlocks ownership,
1670 * the thread is in a safe path and it can be unpinned.
1673 lock_list = PCPU_GET(spinlocks);
1674 if (lock_list != NULL && lock_list->ll_count != 0) {
1678 * We should only have one spinlock and as long as
1679 * the flags cannot match for this locks class,
1680 * check if the first spinlock is the one curthread
1683 lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1684 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1685 lock1->li_lock == lock && n == 0)
1691 printf(" with the following");
1692 if (flags & WARN_SLEEPOK)
1693 printf(" non-sleepable");
1694 printf(" locks held:\n");
1695 n += witness_list_locks(&lock_list, printf);
1698 if (flags & WARN_PANIC && n)
1699 panic("%s", __func__);
1701 witness_debugger(n);
1706 witness_file(struct lock_object *lock)
1710 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1712 w = lock->lo_witness;
1717 witness_line(struct lock_object *lock)
1721 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1723 w = lock->lo_witness;
1727 static struct witness *
1728 enroll(const char *description, struct lock_class *lock_class)
1731 struct witness_list *typelist;
1733 MPASS(description != NULL);
1735 if (witness_watch == -1 || panicstr != NULL)
1737 if ((lock_class->lc_flags & LC_SPINLOCK)) {
1738 if (witness_skipspin)
1742 } else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1743 typelist = &w_sleep;
1745 panic("lock class %s is not sleep or spin",
1746 lock_class->lc_name);
1748 mtx_lock_spin(&w_mtx);
1749 w = witness_hash_get(description);
1752 if ((w = witness_get()) == NULL)
1754 MPASS(strlen(description) < MAX_W_NAME);
1755 strcpy(w->w_name, description);
1756 w->w_class = lock_class;
1758 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1759 if (lock_class->lc_flags & LC_SPINLOCK) {
1760 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1762 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1763 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1767 /* Insert new witness into the hash */
1768 witness_hash_put(w);
1769 witness_increment_graph_generation();
1770 mtx_unlock_spin(&w_mtx);
1774 mtx_unlock_spin(&w_mtx);
1775 if (lock_class != w->w_class)
1777 "lock (%s) %s does not match earlier (%s) lock",
1778 description, lock_class->lc_name,
1779 w->w_class->lc_name);
1784 depart(struct witness *w)
1786 struct witness_list *list;
1788 MPASS(w->w_refcount == 0);
1789 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1797 * Set file to NULL as it may point into a loadable module.
1801 witness_increment_graph_generation();
1806 adopt(struct witness *parent, struct witness *child)
1810 if (witness_cold == 0)
1811 mtx_assert(&w_mtx, MA_OWNED);
1813 /* If the relationship is already known, there's no work to be done. */
1814 if (isitmychild(parent, child))
1817 /* When the structure of the graph changes, bump up the generation. */
1818 witness_increment_graph_generation();
1821 * The hard part ... create the direct relationship, then propagate all
1822 * indirect relationships.
1824 pi = parent->w_index;
1825 ci = child->w_index;
1826 WITNESS_INDEX_ASSERT(pi);
1827 WITNESS_INDEX_ASSERT(ci);
1829 w_rmatrix[pi][ci] |= WITNESS_PARENT;
1830 w_rmatrix[ci][pi] |= WITNESS_CHILD;
1833 * If parent was not already an ancestor of child,
1834 * then we increment the descendant and ancestor counters.
1836 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1837 parent->w_num_descendants++;
1838 child->w_num_ancestors++;
1842 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1843 * an ancestor of 'pi' during this loop.
1845 for (i = 1; i <= w_max_used_index; i++) {
1846 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1850 /* Find each descendant of 'i' and mark it as a descendant. */
1851 for (j = 1; j <= w_max_used_index; j++) {
1854 * Skip children that are already marked as
1855 * descendants of 'i'.
1857 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1861 * We are only interested in descendants of 'ci'. Note
1862 * that 'ci' itself is counted as a descendant of 'ci'.
1864 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1867 w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1868 w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1869 w_data[i].w_num_descendants++;
1870 w_data[j].w_num_ancestors++;
1873 * Make sure we aren't marking a node as both an
1874 * ancestor and descendant. We should have caught
1875 * this as a lock order reversal earlier.
1877 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1878 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1879 printf("witness rmatrix paradox! [%d][%d]=%d "
1880 "both ancestor and descendant\n",
1881 i, j, w_rmatrix[i][j]);
1883 printf("Witness disabled.\n");
1886 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1887 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1888 printf("witness rmatrix paradox! [%d][%d]=%d "
1889 "both ancestor and descendant\n",
1890 j, i, w_rmatrix[j][i]);
1892 printf("Witness disabled.\n");
1900 itismychild(struct witness *parent, struct witness *child)
1903 MPASS(child != NULL && parent != NULL);
1904 if (witness_cold == 0)
1905 mtx_assert(&w_mtx, MA_OWNED);
1907 if (!witness_lock_type_equal(parent, child)) {
1908 if (witness_cold == 0)
1909 mtx_unlock_spin(&w_mtx);
1910 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1911 "the same lock type", __func__, parent->w_name,
1912 parent->w_class->lc_name, child->w_name,
1913 child->w_class->lc_name);
1915 adopt(parent, child);
1919 * Generic code for the isitmy*() functions. The rmask parameter is the
1920 * expected relationship of w1 to w2.
1923 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1925 unsigned char r1, r2;
1930 WITNESS_INDEX_ASSERT(i1);
1931 WITNESS_INDEX_ASSERT(i2);
1932 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1933 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1935 /* The flags on one better be the inverse of the flags on the other */
1936 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1937 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1938 printf("%s: rmatrix mismatch between %s (index %d) and %s "
1939 "(index %d): w_rmatrix[%d][%d] == %hhx but "
1940 "w_rmatrix[%d][%d] == %hhx\n",
1941 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1944 printf("Witness disabled.\n");
1947 return (r1 & rmask);
1951 * Checks if @child is a direct child of @parent.
1954 isitmychild(struct witness *parent, struct witness *child)
1957 return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1961 * Checks if @descendant is a direct or inderect descendant of @ancestor.
1964 isitmydescendant(struct witness *ancestor, struct witness *descendant)
1967 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1973 blessed(struct witness *w1, struct witness *w2)
1976 struct witness_blessed *b;
1978 for (i = 0; i < blessed_count; i++) {
1979 b = &blessed_list[i];
1980 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1981 if (strcmp(w2->w_name, b->b_lock2) == 0)
1985 if (strcmp(w1->w_name, b->b_lock2) == 0)
1986 if (strcmp(w2->w_name, b->b_lock1) == 0)
1993 static struct witness *
1999 if (witness_cold == 0)
2000 mtx_assert(&w_mtx, MA_OWNED);
2002 if (witness_watch == -1) {
2003 mtx_unlock_spin(&w_mtx);
2006 if (STAILQ_EMPTY(&w_free)) {
2008 mtx_unlock_spin(&w_mtx);
2009 printf("WITNESS: unable to allocate a new witness object\n");
2012 w = STAILQ_FIRST(&w_free);
2013 STAILQ_REMOVE_HEAD(&w_free, w_list);
2016 MPASS(index > 0 && index == w_max_used_index+1 &&
2017 index < WITNESS_COUNT);
2018 bzero(w, sizeof(*w));
2020 if (index > w_max_used_index)
2021 w_max_used_index = index;
2026 witness_free(struct witness *w)
2029 STAILQ_INSERT_HEAD(&w_free, w, w_list);
2033 static struct lock_list_entry *
2034 witness_lock_list_get(void)
2036 struct lock_list_entry *lle;
2038 if (witness_watch == -1)
2040 mtx_lock_spin(&w_mtx);
2041 lle = w_lock_list_free;
2044 mtx_unlock_spin(&w_mtx);
2045 printf("%s: witness exhausted\n", __func__);
2048 w_lock_list_free = lle->ll_next;
2049 mtx_unlock_spin(&w_mtx);
2050 bzero(lle, sizeof(*lle));
2055 witness_lock_list_free(struct lock_list_entry *lle)
2058 mtx_lock_spin(&w_mtx);
2059 lle->ll_next = w_lock_list_free;
2060 w_lock_list_free = lle;
2061 mtx_unlock_spin(&w_mtx);
2064 static struct lock_instance *
2065 find_instance(struct lock_list_entry *list, struct lock_object *lock)
2067 struct lock_list_entry *lle;
2068 struct lock_instance *instance;
2071 for (lle = list; lle != NULL; lle = lle->ll_next)
2072 for (i = lle->ll_count - 1; i >= 0; i--) {
2073 instance = &lle->ll_children[i];
2074 if (instance->li_lock == lock)
2081 witness_list_lock(struct lock_instance *instance,
2082 int (*prnt)(const char *fmt, ...))
2084 struct lock_object *lock;
2086 lock = instance->li_lock;
2087 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2088 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2089 if (lock->lo_witness->w_name != lock->lo_name)
2090 prnt(" (%s)", lock->lo_witness->w_name);
2091 prnt(" r = %d (%p) locked @ %s:%d\n",
2092 instance->li_flags & LI_RECURSEMASK, lock,
2093 fixup_filename(instance->li_file), instance->li_line);
2098 witness_thread_has_locks(struct thread *td)
2101 if (td->td_sleeplocks == NULL)
2103 return (td->td_sleeplocks->ll_count != 0);
2107 witness_proc_has_locks(struct proc *p)
2111 FOREACH_THREAD_IN_PROC(p, td) {
2112 if (witness_thread_has_locks(td))
2120 witness_list_locks(struct lock_list_entry **lock_list,
2121 int (*prnt)(const char *fmt, ...))
2123 struct lock_list_entry *lle;
2127 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2128 for (i = lle->ll_count - 1; i >= 0; i--) {
2129 witness_list_lock(&lle->ll_children[i], prnt);
2136 * This is a bit risky at best. We call this function when we have timed
2137 * out acquiring a spin lock, and we assume that the other CPU is stuck
2138 * with this lock held. So, we go groveling around in the other CPU's
2139 * per-cpu data to try to find the lock instance for this spin lock to
2140 * see when it was last acquired.
2143 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2144 int (*prnt)(const char *fmt, ...))
2146 struct lock_instance *instance;
2149 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2151 pc = pcpu_find(owner->td_oncpu);
2152 instance = find_instance(pc->pc_spinlocks, lock);
2153 if (instance != NULL)
2154 witness_list_lock(instance, prnt);
2158 witness_save(struct lock_object *lock, const char **filep, int *linep)
2160 struct lock_list_entry *lock_list;
2161 struct lock_instance *instance;
2162 struct lock_class *class;
2165 * This function is used independently in locking code to deal with
2166 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2169 if (SCHEDULER_STOPPED())
2171 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2172 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2174 class = LOCK_CLASS(lock);
2175 if (class->lc_flags & LC_SLEEPLOCK)
2176 lock_list = curthread->td_sleeplocks;
2178 if (witness_skipspin)
2180 lock_list = PCPU_GET(spinlocks);
2182 instance = find_instance(lock_list, lock);
2183 if (instance == NULL)
2184 panic("%s: lock (%s) %s not locked", __func__,
2185 class->lc_name, lock->lo_name);
2186 *filep = instance->li_file;
2187 *linep = instance->li_line;
2191 witness_restore(struct lock_object *lock, const char *file, int line)
2193 struct lock_list_entry *lock_list;
2194 struct lock_instance *instance;
2195 struct lock_class *class;
2198 * This function is used independently in locking code to deal with
2199 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2202 if (SCHEDULER_STOPPED())
2204 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2205 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2207 class = LOCK_CLASS(lock);
2208 if (class->lc_flags & LC_SLEEPLOCK)
2209 lock_list = curthread->td_sleeplocks;
2211 if (witness_skipspin)
2213 lock_list = PCPU_GET(spinlocks);
2215 instance = find_instance(lock_list, lock);
2216 if (instance == NULL)
2217 panic("%s: lock (%s) %s not locked", __func__,
2218 class->lc_name, lock->lo_name);
2219 lock->lo_witness->w_file = file;
2220 lock->lo_witness->w_line = line;
2221 instance->li_file = file;
2222 instance->li_line = line;
2226 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2228 #ifdef INVARIANT_SUPPORT
2229 struct lock_instance *instance;
2230 struct lock_class *class;
2232 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2234 class = LOCK_CLASS(lock);
2235 if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2236 instance = find_instance(curthread->td_sleeplocks, lock);
2237 else if ((class->lc_flags & LC_SPINLOCK) != 0)
2238 instance = find_instance(PCPU_GET(spinlocks), lock);
2240 panic("Lock (%s) %s is not sleep or spin!",
2241 class->lc_name, lock->lo_name);
2245 if (instance != NULL)
2246 panic("Lock (%s) %s locked @ %s:%d.",
2247 class->lc_name, lock->lo_name,
2248 fixup_filename(file), line);
2251 case LA_LOCKED | LA_RECURSED:
2252 case LA_LOCKED | LA_NOTRECURSED:
2254 case LA_SLOCKED | LA_RECURSED:
2255 case LA_SLOCKED | LA_NOTRECURSED:
2257 case LA_XLOCKED | LA_RECURSED:
2258 case LA_XLOCKED | LA_NOTRECURSED:
2259 if (instance == NULL) {
2260 panic("Lock (%s) %s not locked @ %s:%d.",
2261 class->lc_name, lock->lo_name,
2262 fixup_filename(file), line);
2265 if ((flags & LA_XLOCKED) != 0 &&
2266 (instance->li_flags & LI_EXCLUSIVE) == 0)
2267 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2268 class->lc_name, lock->lo_name,
2269 fixup_filename(file), line);
2270 if ((flags & LA_SLOCKED) != 0 &&
2271 (instance->li_flags & LI_EXCLUSIVE) != 0)
2272 panic("Lock (%s) %s exclusively locked @ %s:%d.",
2273 class->lc_name, lock->lo_name,
2274 fixup_filename(file), line);
2275 if ((flags & LA_RECURSED) != 0 &&
2276 (instance->li_flags & LI_RECURSEMASK) == 0)
2277 panic("Lock (%s) %s not recursed @ %s:%d.",
2278 class->lc_name, lock->lo_name,
2279 fixup_filename(file), line);
2280 if ((flags & LA_NOTRECURSED) != 0 &&
2281 (instance->li_flags & LI_RECURSEMASK) != 0)
2282 panic("Lock (%s) %s recursed @ %s:%d.",
2283 class->lc_name, lock->lo_name,
2284 fixup_filename(file), line);
2287 panic("Invalid lock assertion at %s:%d.",
2288 fixup_filename(file), line);
2291 #endif /* INVARIANT_SUPPORT */
2295 witness_setflag(struct lock_object *lock, int flag, int set)
2297 struct lock_list_entry *lock_list;
2298 struct lock_instance *instance;
2299 struct lock_class *class;
2301 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2303 class = LOCK_CLASS(lock);
2304 if (class->lc_flags & LC_SLEEPLOCK)
2305 lock_list = curthread->td_sleeplocks;
2307 if (witness_skipspin)
2309 lock_list = PCPU_GET(spinlocks);
2311 instance = find_instance(lock_list, lock);
2312 if (instance == NULL)
2313 panic("%s: lock (%s) %s not locked", __func__,
2314 class->lc_name, lock->lo_name);
2317 instance->li_flags |= flag;
2319 instance->li_flags &= ~flag;
2323 witness_norelease(struct lock_object *lock)
2326 witness_setflag(lock, LI_NORELEASE, 1);
2330 witness_releaseok(struct lock_object *lock)
2333 witness_setflag(lock, LI_NORELEASE, 0);
2338 witness_ddb_list(struct thread *td)
2341 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2342 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2344 if (witness_watch < 1)
2347 witness_list_locks(&td->td_sleeplocks, db_printf);
2350 * We only handle spinlocks if td == curthread. This is somewhat broken
2351 * if td is currently executing on some other CPU and holds spin locks
2352 * as we won't display those locks. If we had a MI way of getting
2353 * the per-cpu data for a given cpu then we could use
2354 * td->td_oncpu to get the list of spinlocks for this thread
2357 * That still wouldn't really fix this unless we locked the scheduler
2358 * lock or stopped the other CPU to make sure it wasn't changing the
2359 * list out from under us. It is probably best to just not try to
2360 * handle threads on other CPU's for now.
2362 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2363 witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2366 DB_SHOW_COMMAND(locks, db_witness_list)
2371 td = db_lookup_thread(addr, TRUE);
2374 witness_ddb_list(td);
2377 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2383 * It would be nice to list only threads and processes that actually
2384 * held sleep locks, but that information is currently not exported
2387 FOREACH_PROC_IN_SYSTEM(p) {
2388 if (!witness_proc_has_locks(p))
2390 FOREACH_THREAD_IN_PROC(p, td) {
2391 if (!witness_thread_has_locks(td))
2393 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2394 p->p_comm, td, td->td_tid);
2395 witness_ddb_list(td);
2399 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2401 DB_SHOW_COMMAND(witness, db_witness_display)
2404 witness_ddb_display(db_printf);
2409 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2411 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2412 struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2414 u_int w_rmatrix1, w_rmatrix2;
2415 int error, generation, i, j;
2421 if (witness_watch < 1) {
2422 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2426 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2430 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2434 /* Allocate and init temporary storage space. */
2435 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2436 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2437 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2439 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2441 stack_zero(&tmp_data1->wlod_stack);
2442 stack_zero(&tmp_data2->wlod_stack);
2445 mtx_lock_spin(&w_mtx);
2446 generation = w_generation;
2447 mtx_unlock_spin(&w_mtx);
2448 sbuf_printf(sb, "Number of known direct relationships is %d\n",
2449 w_lohash.wloh_count);
2450 for (i = 1; i < w_max_used_index; i++) {
2451 mtx_lock_spin(&w_mtx);
2452 if (generation != w_generation) {
2453 mtx_unlock_spin(&w_mtx);
2455 /* The graph has changed, try again. */
2462 if (w1->w_reversed == 0) {
2463 mtx_unlock_spin(&w_mtx);
2467 /* Copy w1 locally so we can release the spin lock. */
2469 mtx_unlock_spin(&w_mtx);
2471 if (tmp_w1->w_reversed == 0)
2473 for (j = 1; j < w_max_used_index; j++) {
2474 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2477 mtx_lock_spin(&w_mtx);
2478 if (generation != w_generation) {
2479 mtx_unlock_spin(&w_mtx);
2481 /* The graph has changed, try again. */
2488 data1 = witness_lock_order_get(w1, w2);
2489 data2 = witness_lock_order_get(w2, w1);
2492 * Copy information locally so we can release the
2496 w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2497 w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2500 stack_zero(&tmp_data1->wlod_stack);
2501 stack_copy(&data1->wlod_stack,
2502 &tmp_data1->wlod_stack);
2504 if (data2 && data2 != data1) {
2505 stack_zero(&tmp_data2->wlod_stack);
2506 stack_copy(&data2->wlod_stack,
2507 &tmp_data2->wlod_stack);
2509 mtx_unlock_spin(&w_mtx);
2512 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2513 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2514 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2517 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2518 tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2519 tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2523 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2524 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2525 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2526 stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2527 sbuf_printf(sb, "\n");
2529 if (data2 && data2 != data1) {
2531 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2532 tmp_w2->w_name, tmp_w2->w_class->lc_name,
2533 tmp_w1->w_name, tmp_w1->w_class->lc_name);
2534 stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2535 sbuf_printf(sb, "\n");
2539 mtx_lock_spin(&w_mtx);
2540 if (generation != w_generation) {
2541 mtx_unlock_spin(&w_mtx);
2544 * The graph changed while we were printing stack data,
2551 mtx_unlock_spin(&w_mtx);
2553 /* Free temporary storage space. */
2554 free(tmp_data1, M_TEMP);
2555 free(tmp_data2, M_TEMP);
2556 free(tmp_w1, M_TEMP);
2557 free(tmp_w2, M_TEMP);
2560 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2567 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2573 if (witness_watch < 1) {
2574 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2578 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2583 error = sysctl_wire_old_buffer(req, 0);
2586 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2589 sbuf_printf(sb, "\n");
2591 mtx_lock_spin(&w_mtx);
2592 STAILQ_FOREACH(w, &w_all, w_list)
2594 STAILQ_FOREACH(w, &w_all, w_list)
2595 witness_add_fullgraph(sb, w);
2596 mtx_unlock_spin(&w_mtx);
2599 * Close the sbuf and return to userland.
2601 error = sbuf_finish(sb);
2608 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2612 value = witness_watch;
2613 error = sysctl_handle_int(oidp, &value, 0, req);
2614 if (error != 0 || req->newptr == NULL)
2616 if (value > 1 || value < -1 ||
2617 (witness_watch == -1 && value != witness_watch))
2619 witness_watch = value;
2624 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2628 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2632 WITNESS_INDEX_ASSERT(w->w_index);
2633 for (i = 1; i <= w_max_used_index; i++) {
2634 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2635 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2637 witness_add_fullgraph(sb, &w_data[i]);
2643 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2644 * interprets the key as a string and reads until the null
2645 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2646 * hash value computed from the key.
2649 witness_hash_djb2(const uint8_t *key, uint32_t size)
2651 unsigned int hash = 5381;
2654 /* hash = hash * 33 + key[i] */
2656 for (i = 0; i < size; i++)
2657 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2659 for (i = 0; key[i] != 0; i++)
2660 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2667 * Initializes the two witness hash tables. Called exactly once from
2668 * witness_initialize().
2671 witness_init_hash_tables(void)
2675 MPASS(witness_cold);
2677 /* Initialize the hash tables. */
2678 for (i = 0; i < WITNESS_HASH_SIZE; i++)
2679 w_hash.wh_array[i] = NULL;
2681 w_hash.wh_size = WITNESS_HASH_SIZE;
2682 w_hash.wh_count = 0;
2684 /* Initialize the lock order data hash. */
2686 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2687 memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2688 w_lodata[i].wlod_next = w_lofree;
2689 w_lofree = &w_lodata[i];
2691 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2692 w_lohash.wloh_count = 0;
2693 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2694 w_lohash.wloh_array[i] = NULL;
2697 static struct witness *
2698 witness_hash_get(const char *key)
2704 if (witness_cold == 0)
2705 mtx_assert(&w_mtx, MA_OWNED);
2706 hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2707 w = w_hash.wh_array[hash];
2709 if (strcmp(w->w_name, key) == 0)
2719 witness_hash_put(struct witness *w)
2724 MPASS(w->w_name != NULL);
2725 if (witness_cold == 0)
2726 mtx_assert(&w_mtx, MA_OWNED);
2727 KASSERT(witness_hash_get(w->w_name) == NULL,
2728 ("%s: trying to add a hash entry that already exists!", __func__));
2729 KASSERT(w->w_hash_next == NULL,
2730 ("%s: w->w_hash_next != NULL", __func__));
2732 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2733 w->w_hash_next = w_hash.wh_array[hash];
2734 w_hash.wh_array[hash] = w;
2739 static struct witness_lock_order_data *
2740 witness_lock_order_get(struct witness *parent, struct witness *child)
2742 struct witness_lock_order_data *data = NULL;
2743 struct witness_lock_order_key key;
2746 MPASS(parent != NULL && child != NULL);
2747 key.from = parent->w_index;
2748 key.to = child->w_index;
2749 WITNESS_INDEX_ASSERT(key.from);
2750 WITNESS_INDEX_ASSERT(key.to);
2751 if ((w_rmatrix[parent->w_index][child->w_index]
2752 & WITNESS_LOCK_ORDER_KNOWN) == 0)
2755 hash = witness_hash_djb2((const char*)&key,
2756 sizeof(key)) % w_lohash.wloh_size;
2757 data = w_lohash.wloh_array[hash];
2758 while (data != NULL) {
2759 if (witness_lock_order_key_equal(&data->wlod_key, &key))
2761 data = data->wlod_next;
2769 * Verify that parent and child have a known relationship, are not the same,
2770 * and child is actually a child of parent. This is done without w_mtx
2771 * to avoid contention in the common case.
2774 witness_lock_order_check(struct witness *parent, struct witness *child)
2777 if (parent != child &&
2778 w_rmatrix[parent->w_index][child->w_index]
2779 & WITNESS_LOCK_ORDER_KNOWN &&
2780 isitmychild(parent, child))
2787 witness_lock_order_add(struct witness *parent, struct witness *child)
2789 struct witness_lock_order_data *data = NULL;
2790 struct witness_lock_order_key key;
2793 MPASS(parent != NULL && child != NULL);
2794 key.from = parent->w_index;
2795 key.to = child->w_index;
2796 WITNESS_INDEX_ASSERT(key.from);
2797 WITNESS_INDEX_ASSERT(key.to);
2798 if (w_rmatrix[parent->w_index][child->w_index]
2799 & WITNESS_LOCK_ORDER_KNOWN)
2802 hash = witness_hash_djb2((const char*)&key,
2803 sizeof(key)) % w_lohash.wloh_size;
2804 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2808 w_lofree = data->wlod_next;
2809 data->wlod_next = w_lohash.wloh_array[hash];
2810 data->wlod_key = key;
2811 w_lohash.wloh_array[hash] = data;
2812 w_lohash.wloh_count++;
2813 stack_zero(&data->wlod_stack);
2814 stack_save(&data->wlod_stack);
2818 /* Call this whenver the structure of the witness graph changes. */
2820 witness_increment_graph_generation(void)
2823 if (witness_cold == 0)
2824 mtx_assert(&w_mtx, MA_OWNED);
2830 _witness_debugger(int cond, const char *msg)
2833 if (witness_trace && cond)
2835 if (witness_kdb && cond)
2836 kdb_enter(KDB_WHY_WITNESS, msg);