2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2008 Isilon Systems, Inc.
5 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
6 * Copyright (c) 1998 Berkeley Software Design, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Berkeley Software Design Inc's name may not be used to endorse or
18 * promote products derived from this software without specific prior
21 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
34 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
38 * Implementation of the `witness' lock verifier. Originally implemented for
39 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
45 * Pronunciation: 'wit-n&s
47 * Etymology: Middle English witnesse, from Old English witnes knowledge,
48 * testimony, witness, from 2wit
49 * Date: before 12th century
50 * 1 : attestation of a fact or event : TESTIMONY
51 * 2 : one that gives evidence; specifically : one who testifies in
52 * a cause or before a judicial tribunal
53 * 3 : one asked to be present at a transaction so as to be able to
54 * testify to its having taken place
55 * 4 : one who has personal knowledge of something
56 * 5 a : something serving as evidence or proof : SIGN
57 * b : public affirmation by word or example of usually
58 * religious faith or conviction <the heroic witness to divine
60 * 6 capitalized : a member of the Jehovah's Witnesses
64 * Special rules concerning Giant and lock orders:
66 * 1) Giant must be acquired before any other mutexes. Stated another way,
67 * no other mutex may be held when Giant is acquired.
69 * 2) Giant must be released when blocking on a sleepable lock.
71 * This rule is less obvious, but is a result of Giant providing the same
72 * semantics as spl(). Basically, when a thread sleeps, it must release
73 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
76 * 3) Giant may be acquired before or after sleepable locks.
78 * This rule is also not quite as obvious. Giant may be acquired after
79 * a sleepable lock because it is a non-sleepable lock and non-sleepable
80 * locks may always be acquired while holding a sleepable lock. The second
81 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
82 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
83 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
84 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
85 * execute. Thus, acquiring Giant both before and after a sleepable lock
86 * will not result in a lock order reversal.
89 #include <sys/cdefs.h>
90 __FBSDID("$FreeBSD$");
93 #include "opt_hwpmc_hooks.h"
94 #include "opt_stack.h"
95 #include "opt_witness.h"
97 #include <sys/param.h>
100 #include <sys/kernel.h>
102 #include <sys/lock.h>
103 #include <sys/malloc.h>
104 #include <sys/mutex.h>
105 #include <sys/priv.h>
106 #include <sys/proc.h>
107 #include <sys/sbuf.h>
108 #include <sys/sched.h>
109 #include <sys/stack.h>
110 #include <sys/sysctl.h>
111 #include <sys/syslog.h>
112 #include <sys/systm.h>
118 #include <machine/stdarg.h>
120 #if !defined(DDB) && !defined(STACK)
121 #error "DDB or STACK options are required for WITNESS"
124 /* Note that these traces do not work with KTR_ALQ. */
126 #define KTR_WITNESS KTR_SUBSYS
128 #define KTR_WITNESS 0
131 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */
132 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
133 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
134 #define LI_SLEEPABLE 0x00040000 /* Lock may be held while sleeping. */
136 #ifndef WITNESS_COUNT
137 #define WITNESS_COUNT 1536
139 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
140 #define WITNESS_PENDLIST (512 + (MAXCPU * 4))
142 /* Allocate 256 KB of stack data space */
143 #define WITNESS_LO_DATA_COUNT 2048
145 /* Prime, gives load factor of ~2 at full load */
146 #define WITNESS_LO_HASH_SIZE 1021
149 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
150 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should
151 * probably be safe for the most part, but it's still a SWAG.
153 #define LOCK_NCHILDREN 5
154 #define LOCK_CHILDCOUNT 2048
156 #define MAX_W_NAME 64
158 #define FULLGRAPH_SBUF_SIZE 512
161 * These flags go in the witness relationship matrix and describe the
162 * relationship between any two struct witness objects.
164 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */
165 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
166 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
167 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
168 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
169 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
170 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
171 #define WITNESS_RELATED_MASK \
172 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
173 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
175 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
176 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
177 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
179 /* Descendant to ancestor flags */
180 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
182 /* Ancestor to descendant flags */
183 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2)
185 #define WITNESS_INDEX_ASSERT(i) \
186 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count)
188 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
191 * Lock instances. A lock instance is the data associated with a lock while
192 * it is held by witness. For example, a lock instance will hold the
193 * recursion count of a lock. Lock instances are held in lists. Spin locks
194 * are held in a per-cpu list while sleep locks are held in per-thread list.
196 struct lock_instance {
197 struct lock_object *li_lock;
204 * A simple list type used to build the list of locks held by a thread
205 * or CPU. We can't simply embed the list in struct lock_object since a
206 * lock may be held by more than one thread if it is a shared lock. Locks
207 * are added to the head of the list, so we fill up each list entry from
208 * "the back" logically. To ease some of the arithmetic, we actually fill
209 * in each list entry the normal way (children[0] then children[1], etc.) but
210 * when we traverse the list we read children[count-1] as the first entry
211 * down to children[0] as the final entry.
213 struct lock_list_entry {
214 struct lock_list_entry *ll_next;
215 struct lock_instance ll_children[LOCK_NCHILDREN];
220 * The main witness structure. One of these per named lock type in the system
221 * (for example, "vnode interlock").
224 char w_name[MAX_W_NAME];
225 uint32_t w_index; /* Index in the relationship matrix */
226 struct lock_class *w_class;
227 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
228 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
229 struct witness *w_hash_next; /* Linked list in hash buckets. */
230 const char *w_file; /* File where last acquired */
231 uint32_t w_line; /* Line where last acquired */
233 uint16_t w_num_ancestors; /* direct/indirect
235 uint16_t w_num_descendants; /* direct/indirect
236 * descendant count */
238 unsigned w_displayed:1;
239 unsigned w_reversed:1;
242 STAILQ_HEAD(witness_list, witness);
245 * The witness hash table. Keys are witness names (const char *), elements are
246 * witness objects (struct witness *).
248 struct witness_hash {
249 struct witness *wh_array[WITNESS_HASH_SIZE];
255 * Key type for the lock order data hash table.
257 struct witness_lock_order_key {
262 struct witness_lock_order_data {
263 struct stack wlod_stack;
264 struct witness_lock_order_key wlod_key;
265 struct witness_lock_order_data *wlod_next;
269 * The witness lock order data hash table. Keys are witness index tuples
270 * (struct witness_lock_order_key), elements are lock order data objects
271 * (struct witness_lock_order_data).
273 struct witness_lock_order_hash {
274 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE];
279 struct witness_blessed {
284 struct witness_pendhelp {
286 struct lock_object *wh_lock;
289 struct witness_order_list_entry {
291 struct lock_class *w_class;
295 * Returns 0 if one of the locks is a spin lock and the other is not.
296 * Returns 1 otherwise.
299 witness_lock_type_equal(struct witness *w1, struct witness *w2)
302 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
303 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
307 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
308 const struct witness_lock_order_key *b)
311 return (a->from == b->from && a->to == b->to);
314 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
316 static void adopt(struct witness *parent, struct witness *child);
317 static int blessed(struct witness *, struct witness *);
318 static void depart(struct witness *w);
319 static struct witness *enroll(const char *description,
320 struct lock_class *lock_class);
321 static struct lock_instance *find_instance(struct lock_list_entry *list,
322 const struct lock_object *lock);
323 static int isitmychild(struct witness *parent, struct witness *child);
324 static int isitmydescendant(struct witness *parent, struct witness *child);
325 static void itismychild(struct witness *parent, struct witness *child);
326 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
327 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
328 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
329 static int sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS);
330 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
332 static void witness_ddb_compute_levels(void);
333 static void witness_ddb_display(int(*)(const char *fmt, ...));
334 static void witness_ddb_display_descendants(int(*)(const char *fmt, ...),
335 struct witness *, int indent);
336 static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
337 struct witness_list *list);
338 static void witness_ddb_level_descendants(struct witness *parent, int l);
339 static void witness_ddb_list(struct thread *td);
341 static void witness_debugger(int cond, const char *msg);
342 static void witness_free(struct witness *m);
343 static struct witness *witness_get(void);
344 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size);
345 static struct witness *witness_hash_get(const char *key);
346 static void witness_hash_put(struct witness *w);
347 static void witness_init_hash_tables(void);
348 static void witness_increment_graph_generation(void);
349 static void witness_lock_list_free(struct lock_list_entry *lle);
350 static struct lock_list_entry *witness_lock_list_get(void);
351 static int witness_lock_order_add(struct witness *parent,
352 struct witness *child);
353 static int witness_lock_order_check(struct witness *parent,
354 struct witness *child);
355 static struct witness_lock_order_data *witness_lock_order_get(
356 struct witness *parent,
357 struct witness *child);
358 static void witness_list_lock(struct lock_instance *instance,
359 int (*prnt)(const char *fmt, ...));
360 static int witness_output(const char *fmt, ...) __printflike(1, 2);
361 static int witness_voutput(const char *fmt, va_list ap) __printflike(1, 0);
362 static void witness_setflag(struct lock_object *lock, int flag, int set);
364 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
368 * If set to 0, lock order checking is disabled. If set to -1,
369 * witness is completely disabled. Otherwise witness performs full
370 * lock order checking for all locks. At runtime, lock order checking
371 * may be toggled. However, witness cannot be reenabled once it is
372 * completely disabled.
374 static int witness_watch = 1;
375 SYSCTL_PROC(_debug_witness, OID_AUTO, watch,
376 CTLFLAG_RWTUN | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0,
377 sysctl_debug_witness_watch, "I",
378 "witness is watching lock operations");
382 * When KDB is enabled and witness_kdb is 1, it will cause the system
383 * to drop into kdebug() when:
384 * - a lock hierarchy violation occurs
385 * - locks are held when going to sleep.
392 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
395 #if defined(DDB) || defined(KDB)
397 * When DDB or KDB is enabled and witness_trace is 1, it will cause the system
398 * to print a stack trace:
399 * - a lock hierarchy violation occurs
400 * - locks are held when going to sleep.
402 int witness_trace = 1;
403 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
404 #endif /* DDB || KDB */
406 #ifdef WITNESS_SKIPSPIN
407 int witness_skipspin = 1;
409 int witness_skipspin = 0;
411 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, "");
413 int badstack_sbuf_size;
415 int witness_count = WITNESS_COUNT;
416 SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
417 &witness_count, 0, "");
420 * Output channel for witness messages. By default we print to the console.
422 enum witness_channel {
428 static enum witness_channel witness_channel = WITNESS_CONSOLE;
429 SYSCTL_PROC(_debug_witness, OID_AUTO, output_channel,
430 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0,
431 sysctl_debug_witness_channel, "A",
432 "Output channel for warnings");
435 * Call this to print out the relations between locks.
437 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph,
438 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
439 sysctl_debug_witness_fullgraph, "A",
440 "Show locks relation graphs");
443 * Call this to print out the witness faulty stacks.
445 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks,
446 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
447 sysctl_debug_witness_badstacks, "A",
448 "Show bad witness stacks");
450 static struct mtx w_mtx;
453 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
454 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
457 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
458 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
461 static struct lock_list_entry *w_lock_list_free = NULL;
462 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
463 static u_int pending_cnt;
465 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
466 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
467 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
468 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
471 static struct witness *w_data;
472 static uint8_t **w_rmatrix;
473 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
474 static struct witness_hash w_hash; /* The witness hash table. */
476 /* The lock order data hash */
477 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
478 static struct witness_lock_order_data *w_lofree = NULL;
479 static struct witness_lock_order_hash w_lohash;
480 static int w_max_used_index = 0;
481 static unsigned int w_generation = 0;
482 static const char w_notrunning[] = "Witness not running\n";
483 static const char w_stillcold[] = "Witness is still cold\n";
485 static const char w_notallowed[] = "The sysctl is disabled on the arch\n";
488 static struct witness_order_list_entry order_lists[] = {
492 { "proctree", &lock_class_sx },
493 { "allproc", &lock_class_sx },
494 { "allprison", &lock_class_sx },
499 { "Giant", &lock_class_mtx_sleep },
500 { "pipe mutex", &lock_class_mtx_sleep },
501 { "sigio lock", &lock_class_mtx_sleep },
502 { "process group", &lock_class_mtx_sleep },
504 { "pmc-sleep", &lock_class_mtx_sleep },
506 { "process lock", &lock_class_mtx_sleep },
507 { "session", &lock_class_mtx_sleep },
508 { "uidinfo hash", &lock_class_rw },
509 { "time lock", &lock_class_mtx_sleep },
514 { "umtx lock", &lock_class_mtx_sleep },
519 { "accept", &lock_class_mtx_sleep },
520 { "so_snd", &lock_class_mtx_sleep },
521 { "so_rcv", &lock_class_mtx_sleep },
522 { "sellck", &lock_class_mtx_sleep },
527 { "so_rcv", &lock_class_mtx_sleep },
528 { "radix node head", &lock_class_rm },
529 { "rtentry", &lock_class_mtx_sleep },
530 { "ifaddr", &lock_class_mtx_sleep },
534 * protocol locks before interface locks, after UDP locks.
536 { "in_multi_sx", &lock_class_sx },
537 { "udpinp", &lock_class_rw },
538 { "in_multi_list_mtx", &lock_class_mtx_sleep },
539 { "igmp_mtx", &lock_class_mtx_sleep },
540 { "ifnet_rw", &lock_class_rw },
541 { "if_addr_lock", &lock_class_mtx_sleep },
545 * protocol locks before interface locks, after UDP locks.
547 { "in6_multi_sx", &lock_class_sx },
548 { "udpinp", &lock_class_rw },
549 { "in6_multi_list_mtx", &lock_class_mtx_sleep },
550 { "mld_mtx", &lock_class_mtx_sleep },
551 { "ifnet_rw", &lock_class_rw },
552 { "if_addr_lock", &lock_class_mtx_sleep },
555 * UNIX Domain Sockets
557 { "unp_link_rwlock", &lock_class_rw },
558 { "unp_list_lock", &lock_class_mtx_sleep },
559 { "unp", &lock_class_mtx_sleep },
560 { "so_snd", &lock_class_mtx_sleep },
565 { "udp", &lock_class_mtx_sleep },
566 { "udpinp", &lock_class_rw },
567 { "so_snd", &lock_class_mtx_sleep },
572 { "tcp", &lock_class_mtx_sleep },
573 { "tcpinp", &lock_class_rw },
574 { "so_snd", &lock_class_mtx_sleep },
579 { "bpf global lock", &lock_class_sx },
580 { "bpf cdev lock", &lock_class_mtx_sleep },
585 { "nfsd_mtx", &lock_class_mtx_sleep },
586 { "so_snd", &lock_class_mtx_sleep },
592 { "802.11 com lock", &lock_class_mtx_sleep},
597 { "network driver", &lock_class_mtx_sleep},
603 { "ng_node", &lock_class_mtx_sleep },
604 { "ng_worklist", &lock_class_mtx_sleep },
609 { "vm map (system)", &lock_class_mtx_sleep },
610 { "vnode interlock", &lock_class_mtx_sleep },
611 { "cdev", &lock_class_mtx_sleep },
612 { "devthrd", &lock_class_mtx_sleep },
617 { "vm map (user)", &lock_class_sx },
618 { "vm object", &lock_class_rw },
619 { "vm page", &lock_class_mtx_sleep },
620 { "pmap pv global", &lock_class_rw },
621 { "pmap", &lock_class_mtx_sleep },
622 { "pmap pv list", &lock_class_rw },
623 { "vm page free queue", &lock_class_mtx_sleep },
624 { "vm pagequeue", &lock_class_mtx_sleep },
627 * kqueue/VFS interaction
629 { "kqueue", &lock_class_mtx_sleep },
630 { "struct mount mtx", &lock_class_mtx_sleep },
631 { "vnode interlock", &lock_class_mtx_sleep },
636 { "ncvn", &lock_class_mtx_sleep },
637 { "ncbuc", &lock_class_rw },
638 { "vnode interlock", &lock_class_mtx_sleep },
639 { "ncneg", &lock_class_mtx_sleep },
644 { "dn->dn_mtx", &lock_class_sx },
645 { "dr->dt.di.dr_mtx", &lock_class_sx },
646 { "db->db_mtx", &lock_class_sx },
651 { "TCP ID tree", &lock_class_rw },
652 { "tcp log id bucket", &lock_class_mtx_sleep },
653 { "tcpinp", &lock_class_rw },
654 { "TCP log expireq", &lock_class_mtx_sleep },
660 { "ap boot", &lock_class_mtx_spin },
662 { "rm.mutex_mtx", &lock_class_mtx_spin },
663 { "sio", &lock_class_mtx_spin },
665 { "cy", &lock_class_mtx_spin },
667 { "scc_hwmtx", &lock_class_mtx_spin },
668 { "uart_hwmtx", &lock_class_mtx_spin },
669 { "fast_taskqueue", &lock_class_mtx_spin },
670 { "intr table", &lock_class_mtx_spin },
671 { "process slock", &lock_class_mtx_spin },
672 { "syscons video lock", &lock_class_mtx_spin },
673 { "sleepq chain", &lock_class_mtx_spin },
674 { "rm_spinlock", &lock_class_mtx_spin },
675 { "turnstile chain", &lock_class_mtx_spin },
676 { "turnstile lock", &lock_class_mtx_spin },
677 { "sched lock", &lock_class_mtx_spin },
678 { "td_contested", &lock_class_mtx_spin },
679 { "callout", &lock_class_mtx_spin },
680 { "entropy harvest mutex", &lock_class_mtx_spin },
682 { "smp rendezvous", &lock_class_mtx_spin },
685 { "tlb0", &lock_class_mtx_spin },
688 { "sched lock", &lock_class_mtx_spin },
690 { "pmc-per-proc", &lock_class_mtx_spin },
696 { "intrcnt", &lock_class_mtx_spin },
697 { "icu", &lock_class_mtx_spin },
699 { "allpmaps", &lock_class_mtx_spin },
700 { "descriptor tables", &lock_class_mtx_spin },
702 { "clk", &lock_class_mtx_spin },
703 { "cpuset", &lock_class_mtx_spin },
704 { "mprof lock", &lock_class_mtx_spin },
705 { "zombie lock", &lock_class_mtx_spin },
706 { "ALD Queue", &lock_class_mtx_spin },
707 #if defined(__i386__) || defined(__amd64__)
708 { "pcicfg", &lock_class_mtx_spin },
709 { "NDIS thread lock", &lock_class_mtx_spin },
711 { "tw_osl_io_lock", &lock_class_mtx_spin },
712 { "tw_osl_q_lock", &lock_class_mtx_spin },
713 { "tw_cl_io_lock", &lock_class_mtx_spin },
714 { "tw_cl_intr_lock", &lock_class_mtx_spin },
715 { "tw_cl_gen_lock", &lock_class_mtx_spin },
717 { "pmc-leaf", &lock_class_mtx_spin },
719 { "blocked lock", &lock_class_mtx_spin },
725 * Pairs of locks which have been blessed. Witness does not complain about
726 * order problems with blessed lock pairs. Please do not add an entry to the
727 * table without an explanatory comment.
729 static struct witness_blessed blessed_list[] = {
731 * See the comment in ufs_dirhash.c. Basically, a vnode lock serializes
732 * both lock orders, so a deadlock cannot happen as a result of this
735 { "dirhash", "bufwait" },
738 * A UFS vnode may be locked in vget() while a buffer belonging to the
739 * parent directory vnode is locked.
741 { "ufs", "bufwait" },
745 * This global is set to 0 once it becomes safe to use the witness code.
747 static int witness_cold = 1;
750 * This global is set to 1 once the static lock orders have been enrolled
751 * so that a warning can be issued for any spin locks enrolled later.
753 static int witness_spin_warn = 0;
755 /* Trim useless garbage from filenames. */
757 fixup_filename(const char *file)
762 while (strncmp(file, "../", 3) == 0)
768 * Calculate the size of early witness structures.
771 witness_startup_count(void)
775 sz = sizeof(struct witness) * witness_count;
776 sz += sizeof(*w_rmatrix) * (witness_count + 1);
777 sz += sizeof(*w_rmatrix[0]) * (witness_count + 1) *
784 * The WITNESS-enabled diagnostic code. Note that the witness code does
785 * assume that the early boot is single-threaded at least until after this
786 * routine is completed.
789 witness_startup(void *mem)
791 struct lock_object *lock;
792 struct witness_order_list_entry *order;
793 struct witness *w, *w1;
799 p += sizeof(struct witness) * witness_count;
801 w_rmatrix = (void *)p;
802 p += sizeof(*w_rmatrix) * (witness_count + 1);
804 for (i = 0; i < witness_count + 1; i++) {
805 w_rmatrix[i] = (void *)p;
806 p += sizeof(*w_rmatrix[i]) * (witness_count + 1);
808 badstack_sbuf_size = witness_count * 256;
811 * We have to release Giant before initializing its witness
812 * structure so that WITNESS doesn't get confused.
815 mtx_assert(&Giant, MA_NOTOWNED);
817 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
818 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
819 MTX_NOWITNESS | MTX_NOPROFILE);
820 for (i = witness_count - 1; i >= 0; i--) {
822 memset(w, 0, sizeof(*w));
823 w_data[i].w_index = i; /* Witness index never changes. */
826 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
827 ("%s: Invalid list of free witness objects", __func__));
829 /* Witness with index 0 is not used to aid in debugging. */
830 STAILQ_REMOVE_HEAD(&w_free, w_list);
833 for (i = 0; i < witness_count; i++) {
834 memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
835 (witness_count + 1));
838 for (i = 0; i < LOCK_CHILDCOUNT; i++)
839 witness_lock_list_free(&w_locklistdata[i]);
840 witness_init_hash_tables();
842 /* First add in all the specified order lists. */
843 for (order = order_lists; order->w_name != NULL; order++) {
844 w = enroll(order->w_name, order->w_class);
847 w->w_file = "order list";
848 for (order++; order->w_name != NULL; order++) {
849 w1 = enroll(order->w_name, order->w_class);
852 w1->w_file = "order list";
857 witness_spin_warn = 1;
859 /* Iterate through all locks and add them to witness. */
860 for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
861 lock = pending_locks[i].wh_lock;
862 KASSERT(lock->lo_flags & LO_WITNESS,
863 ("%s: lock %s is on pending list but not LO_WITNESS",
864 __func__, lock->lo_name));
865 lock->lo_witness = enroll(pending_locks[i].wh_type,
869 /* Mark the witness code as being ready for use. */
876 witness_init(struct lock_object *lock, const char *type)
878 struct lock_class *class;
880 /* Various sanity checks. */
881 class = LOCK_CLASS(lock);
882 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
883 (class->lc_flags & LC_RECURSABLE) == 0)
884 kassert_panic("%s: lock (%s) %s can not be recursable",
885 __func__, class->lc_name, lock->lo_name);
886 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
887 (class->lc_flags & LC_SLEEPABLE) == 0)
888 kassert_panic("%s: lock (%s) %s can not be sleepable",
889 __func__, class->lc_name, lock->lo_name);
890 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
891 (class->lc_flags & LC_UPGRADABLE) == 0)
892 kassert_panic("%s: lock (%s) %s can not be upgradable",
893 __func__, class->lc_name, lock->lo_name);
896 * If we shouldn't watch this lock, then just clear lo_witness.
897 * Otherwise, if witness_cold is set, then it is too early to
898 * enroll this lock, so defer it to witness_initialize() by adding
899 * it to the pending_locks list. If it is not too early, then enroll
902 if (witness_watch < 1 || KERNEL_PANICKED() ||
903 (lock->lo_flags & LO_WITNESS) == 0)
904 lock->lo_witness = NULL;
905 else if (witness_cold) {
906 pending_locks[pending_cnt].wh_lock = lock;
907 pending_locks[pending_cnt++].wh_type = type;
908 if (pending_cnt > WITNESS_PENDLIST)
909 panic("%s: pending locks list is too small, "
910 "increase WITNESS_PENDLIST\n",
913 lock->lo_witness = enroll(type, class);
917 witness_destroy(struct lock_object *lock)
919 struct lock_class *class;
922 class = LOCK_CLASS(lock);
925 panic("lock (%s) %s destroyed while witness_cold",
926 class->lc_name, lock->lo_name);
928 /* XXX: need to verify that no one holds the lock */
929 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
931 w = lock->lo_witness;
933 mtx_lock_spin(&w_mtx);
934 MPASS(w->w_refcount > 0);
937 if (w->w_refcount == 0)
939 mtx_unlock_spin(&w_mtx);
944 witness_ddb_compute_levels(void)
949 * First clear all levels.
951 STAILQ_FOREACH(w, &w_all, w_list)
955 * Look for locks with no parents and level all their descendants.
957 STAILQ_FOREACH(w, &w_all, w_list) {
959 /* If the witness has ancestors (is not a root), skip it. */
960 if (w->w_num_ancestors > 0)
962 witness_ddb_level_descendants(w, 0);
967 witness_ddb_level_descendants(struct witness *w, int l)
971 if (w->w_ddb_level >= l)
977 for (i = 1; i <= w_max_used_index; i++) {
978 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
979 witness_ddb_level_descendants(&w_data[i], l);
984 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
985 struct witness *w, int indent)
989 for (i = 0; i < indent; i++)
991 prnt("%s (type: %s, depth: %d, active refs: %d)",
992 w->w_name, w->w_class->lc_name,
993 w->w_ddb_level, w->w_refcount);
994 if (w->w_displayed) {
995 prnt(" -- (already displayed)\n");
999 if (w->w_file != NULL && w->w_line != 0)
1000 prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
1003 prnt(" -- never acquired\n");
1005 WITNESS_INDEX_ASSERT(w->w_index);
1006 for (i = 1; i <= w_max_used_index; i++) {
1009 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
1010 witness_ddb_display_descendants(prnt, &w_data[i],
1016 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
1017 struct witness_list *list)
1021 STAILQ_FOREACH(w, list, w_typelist) {
1022 if (w->w_file == NULL || w->w_ddb_level > 0)
1025 /* This lock has no anscestors - display its descendants. */
1026 witness_ddb_display_descendants(prnt, w, 0);
1033 witness_ddb_display(int(*prnt)(const char *fmt, ...))
1037 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1038 witness_ddb_compute_levels();
1040 /* Clear all the displayed flags. */
1041 STAILQ_FOREACH(w, &w_all, w_list)
1045 * First, handle sleep locks which have been acquired at least
1048 prnt("Sleep locks:\n");
1049 witness_ddb_display_list(prnt, &w_sleep);
1054 * Now do spin locks which have been acquired at least once.
1056 prnt("\nSpin locks:\n");
1057 witness_ddb_display_list(prnt, &w_spin);
1062 * Finally, any locks which have not been acquired yet.
1064 prnt("\nLocks which were never acquired:\n");
1065 STAILQ_FOREACH(w, &w_all, w_list) {
1066 if (w->w_file != NULL || w->w_refcount == 0)
1068 prnt("%s (type: %s, depth: %d)\n", w->w_name,
1069 w->w_class->lc_name, w->w_ddb_level);
1077 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1080 if (witness_watch == -1 || KERNEL_PANICKED())
1083 /* Require locks that witness knows about. */
1084 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1085 lock2->lo_witness == NULL)
1088 mtx_assert(&w_mtx, MA_NOTOWNED);
1089 mtx_lock_spin(&w_mtx);
1092 * If we already have either an explicit or implied lock order that
1093 * is the other way around, then return an error.
1095 if (witness_watch &&
1096 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1097 mtx_unlock_spin(&w_mtx);
1101 /* Try to add the new order. */
1102 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1103 lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1104 itismychild(lock1->lo_witness, lock2->lo_witness);
1105 mtx_unlock_spin(&w_mtx);
1110 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1111 int line, struct lock_object *interlock)
1113 struct lock_list_entry *lock_list, *lle;
1114 struct lock_instance *lock1, *lock2, *plock;
1115 struct lock_class *class, *iclass;
1116 struct witness *w, *w1;
1120 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1124 w = lock->lo_witness;
1125 class = LOCK_CLASS(lock);
1128 if (class->lc_flags & LC_SLEEPLOCK) {
1131 * Since spin locks include a critical section, this check
1132 * implicitly enforces a lock order of all sleep locks before
1135 if (td->td_critnest != 0 && !kdb_active)
1136 kassert_panic("acquiring blockable sleep lock with "
1137 "spinlock or critical section held (%s) %s @ %s:%d",
1138 class->lc_name, lock->lo_name,
1139 fixup_filename(file), line);
1142 * If this is the first lock acquired then just return as
1143 * no order checking is needed.
1145 lock_list = td->td_sleeplocks;
1146 if (lock_list == NULL || lock_list->ll_count == 0)
1151 * If this is the first lock, just return as no order
1152 * checking is needed. Avoid problems with thread
1153 * migration pinning the thread while checking if
1154 * spinlocks are held. If at least one spinlock is held
1155 * the thread is in a safe path and it is allowed to
1159 lock_list = PCPU_GET(spinlocks);
1160 if (lock_list == NULL || lock_list->ll_count == 0) {
1168 * Check to see if we are recursing on a lock we already own. If
1169 * so, make sure that we don't mismatch exclusive and shared lock
1172 lock1 = find_instance(lock_list, lock);
1173 if (lock1 != NULL) {
1174 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1175 (flags & LOP_EXCLUSIVE) == 0) {
1176 witness_output("shared lock of (%s) %s @ %s:%d\n",
1177 class->lc_name, lock->lo_name,
1178 fixup_filename(file), line);
1179 witness_output("while exclusively locked from %s:%d\n",
1180 fixup_filename(lock1->li_file), lock1->li_line);
1181 kassert_panic("excl->share");
1183 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1184 (flags & LOP_EXCLUSIVE) != 0) {
1185 witness_output("exclusive lock of (%s) %s @ %s:%d\n",
1186 class->lc_name, lock->lo_name,
1187 fixup_filename(file), line);
1188 witness_output("while share locked from %s:%d\n",
1189 fixup_filename(lock1->li_file), lock1->li_line);
1190 kassert_panic("share->excl");
1195 /* Warn if the interlock is not locked exactly once. */
1196 if (interlock != NULL) {
1197 iclass = LOCK_CLASS(interlock);
1198 lock1 = find_instance(lock_list, interlock);
1200 kassert_panic("interlock (%s) %s not locked @ %s:%d",
1201 iclass->lc_name, interlock->lo_name,
1202 fixup_filename(file), line);
1203 else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1204 kassert_panic("interlock (%s) %s recursed @ %s:%d",
1205 iclass->lc_name, interlock->lo_name,
1206 fixup_filename(file), line);
1210 * Find the previously acquired lock, but ignore interlocks.
1212 plock = &lock_list->ll_children[lock_list->ll_count - 1];
1213 if (interlock != NULL && plock->li_lock == interlock) {
1214 if (lock_list->ll_count > 1)
1216 &lock_list->ll_children[lock_list->ll_count - 2];
1218 lle = lock_list->ll_next;
1221 * The interlock is the only lock we hold, so
1226 plock = &lle->ll_children[lle->ll_count - 1];
1231 * Try to perform most checks without a lock. If this succeeds we
1232 * can skip acquiring the lock and return success. Otherwise we redo
1233 * the check with the lock held to handle races with concurrent updates.
1235 w1 = plock->li_lock->lo_witness;
1236 if (witness_lock_order_check(w1, w))
1239 mtx_lock_spin(&w_mtx);
1240 if (witness_lock_order_check(w1, w)) {
1241 mtx_unlock_spin(&w_mtx);
1244 witness_lock_order_add(w1, w);
1247 * Check for duplicate locks of the same type. Note that we only
1248 * have to check for this on the last lock we just acquired. Any
1249 * other cases will be caught as lock order violations.
1253 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1254 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1255 w_rmatrix[i][i] |= WITNESS_REVERSAL;
1257 mtx_unlock_spin(&w_mtx);
1259 "acquiring duplicate lock of same type: \"%s\"\n",
1261 witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1262 fixup_filename(plock->li_file), plock->li_line);
1263 witness_output(" 2nd %s @ %s:%d\n", lock->lo_name,
1264 fixup_filename(file), line);
1265 witness_debugger(1, __func__);
1267 mtx_unlock_spin(&w_mtx);
1270 mtx_assert(&w_mtx, MA_OWNED);
1273 * If we know that the lock we are acquiring comes after
1274 * the lock we most recently acquired in the lock order tree,
1275 * then there is no need for any further checks.
1277 if (isitmychild(w1, w))
1280 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1281 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1283 MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN);
1284 lock1 = &lle->ll_children[i];
1287 * Ignore the interlock.
1289 if (interlock == lock1->li_lock)
1293 * If this lock doesn't undergo witness checking,
1296 w1 = lock1->li_lock->lo_witness;
1298 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1299 ("lock missing witness structure"));
1304 * If we are locking Giant and this is a sleepable
1305 * lock, then skip it.
1307 if ((lock1->li_flags & LI_SLEEPABLE) != 0 &&
1308 lock == &Giant.lock_object)
1312 * If we are locking a sleepable lock and this lock
1313 * is Giant, then skip it.
1315 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1316 (flags & LOP_NOSLEEP) == 0 &&
1317 lock1->li_lock == &Giant.lock_object)
1321 * If we are locking a sleepable lock and this lock
1322 * isn't sleepable, we want to treat it as a lock
1323 * order violation to enfore a general lock order of
1324 * sleepable locks before non-sleepable locks.
1326 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1327 (flags & LOP_NOSLEEP) == 0 &&
1328 (lock1->li_flags & LI_SLEEPABLE) == 0)
1332 * If we are locking Giant and this is a non-sleepable
1333 * lock, then treat it as a reversal.
1335 if ((lock1->li_flags & LI_SLEEPABLE) == 0 &&
1336 lock == &Giant.lock_object)
1340 * Check the lock order hierarchy for a reveresal.
1342 if (!isitmydescendant(w, w1))
1347 * We have a lock order violation, check to see if it
1348 * is allowed or has already been yelled about.
1351 /* Bail if this violation is known */
1352 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1355 /* Record this as a violation */
1356 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1357 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1358 w->w_reversed = w1->w_reversed = 1;
1359 witness_increment_graph_generation();
1362 * If the lock order is blessed, bail before logging
1363 * anything. We don't look for other lock order
1364 * violations though, which may be a bug.
1368 mtx_unlock_spin(&w_mtx);
1370 #ifdef WITNESS_NO_VNODE
1372 * There are known LORs between VNODE locks. They are
1373 * not an indication of a bug. VNODE locks are flagged
1374 * as such (LO_IS_VNODE) and we don't yell if the LOR
1375 * is between 2 VNODE locks.
1377 if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1378 (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1383 * Ok, yell about it.
1385 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1386 (flags & LOP_NOSLEEP) == 0 &&
1387 (lock1->li_flags & LI_SLEEPABLE) == 0)
1389 "lock order reversal: (sleepable after non-sleepable)\n");
1390 else if ((lock1->li_flags & LI_SLEEPABLE) == 0
1391 && lock == &Giant.lock_object)
1393 "lock order reversal: (Giant after non-sleepable)\n");
1395 witness_output("lock order reversal:\n");
1398 * Try to locate an earlier lock with
1399 * witness w in our list.
1402 lock2 = &lle->ll_children[i];
1403 MPASS(lock2->li_lock != NULL);
1404 if (lock2->li_lock->lo_witness == w)
1406 if (i == 0 && lle->ll_next != NULL) {
1408 i = lle->ll_count - 1;
1409 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1414 witness_output(" 1st %p %s (%s) @ %s:%d\n",
1415 lock1->li_lock, lock1->li_lock->lo_name,
1416 w1->w_name, fixup_filename(lock1->li_file),
1418 witness_output(" 2nd %p %s (%s) @ %s:%d\n", lock,
1419 lock->lo_name, w->w_name,
1420 fixup_filename(file), line);
1422 witness_output(" 1st %p %s (%s) @ %s:%d\n",
1423 lock2->li_lock, lock2->li_lock->lo_name,
1424 lock2->li_lock->lo_witness->w_name,
1425 fixup_filename(lock2->li_file),
1427 witness_output(" 2nd %p %s (%s) @ %s:%d\n",
1428 lock1->li_lock, lock1->li_lock->lo_name,
1429 w1->w_name, fixup_filename(lock1->li_file),
1431 witness_output(" 3rd %p %s (%s) @ %s:%d\n", lock,
1432 lock->lo_name, w->w_name,
1433 fixup_filename(file), line);
1435 witness_debugger(1, __func__);
1441 * If requested, build a new lock order. However, don't build a new
1442 * relationship between a sleepable lock and Giant if it is in the
1443 * wrong direction. The correct lock order is that sleepable locks
1444 * always come before Giant.
1446 if (flags & LOP_NEWORDER &&
1447 !(plock->li_lock == &Giant.lock_object &&
1448 (lock->lo_flags & LO_SLEEPABLE) != 0 &&
1449 (flags & LOP_NOSLEEP) == 0)) {
1450 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1451 w->w_name, plock->li_lock->lo_witness->w_name);
1452 itismychild(plock->li_lock->lo_witness, w);
1455 mtx_unlock_spin(&w_mtx);
1459 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1461 struct lock_list_entry **lock_list, *lle;
1462 struct lock_instance *instance;
1466 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1469 w = lock->lo_witness;
1472 /* Determine lock list for this lock. */
1473 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1474 lock_list = &td->td_sleeplocks;
1476 lock_list = PCPU_PTR(spinlocks);
1478 /* Check to see if we are recursing on a lock we already own. */
1479 instance = find_instance(*lock_list, lock);
1480 if (instance != NULL) {
1481 instance->li_flags++;
1482 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1483 td->td_proc->p_pid, lock->lo_name,
1484 instance->li_flags & LI_RECURSEMASK);
1485 instance->li_file = file;
1486 instance->li_line = line;
1490 /* Update per-witness last file and line acquire. */
1494 /* Find the next open lock instance in the list and fill it. */
1496 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1497 lle = witness_lock_list_get();
1500 lle->ll_next = *lock_list;
1501 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1502 td->td_proc->p_pid, lle);
1505 instance = &lle->ll_children[lle->ll_count++];
1506 instance->li_lock = lock;
1507 instance->li_line = line;
1508 instance->li_file = file;
1509 instance->li_flags = 0;
1510 if ((flags & LOP_EXCLUSIVE) != 0)
1511 instance->li_flags |= LI_EXCLUSIVE;
1512 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && (flags & LOP_NOSLEEP) == 0)
1513 instance->li_flags |= LI_SLEEPABLE;
1514 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1515 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1519 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1521 struct lock_instance *instance;
1522 struct lock_class *class;
1524 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1525 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
1527 class = LOCK_CLASS(lock);
1528 if (witness_watch) {
1529 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1531 "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1532 class->lc_name, lock->lo_name,
1533 fixup_filename(file), line);
1534 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1536 "upgrade of non-sleep lock (%s) %s @ %s:%d",
1537 class->lc_name, lock->lo_name,
1538 fixup_filename(file), line);
1540 instance = find_instance(curthread->td_sleeplocks, lock);
1541 if (instance == NULL) {
1542 kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1543 class->lc_name, lock->lo_name,
1544 fixup_filename(file), line);
1547 if (witness_watch) {
1548 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1550 "upgrade of exclusive lock (%s) %s @ %s:%d",
1551 class->lc_name, lock->lo_name,
1552 fixup_filename(file), line);
1553 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1555 "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1556 class->lc_name, lock->lo_name,
1557 instance->li_flags & LI_RECURSEMASK,
1558 fixup_filename(file), line);
1560 instance->li_flags |= LI_EXCLUSIVE;
1564 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1567 struct lock_instance *instance;
1568 struct lock_class *class;
1570 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1571 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
1573 class = LOCK_CLASS(lock);
1574 if (witness_watch) {
1575 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1577 "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1578 class->lc_name, lock->lo_name,
1579 fixup_filename(file), line);
1580 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1582 "downgrade of non-sleep lock (%s) %s @ %s:%d",
1583 class->lc_name, lock->lo_name,
1584 fixup_filename(file), line);
1586 instance = find_instance(curthread->td_sleeplocks, lock);
1587 if (instance == NULL) {
1588 kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1589 class->lc_name, lock->lo_name,
1590 fixup_filename(file), line);
1593 if (witness_watch) {
1594 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1596 "downgrade of shared lock (%s) %s @ %s:%d",
1597 class->lc_name, lock->lo_name,
1598 fixup_filename(file), line);
1599 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1601 "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1602 class->lc_name, lock->lo_name,
1603 instance->li_flags & LI_RECURSEMASK,
1604 fixup_filename(file), line);
1606 instance->li_flags &= ~LI_EXCLUSIVE;
1610 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1612 struct lock_list_entry **lock_list, *lle;
1613 struct lock_instance *instance;
1614 struct lock_class *class;
1619 if (witness_cold || lock->lo_witness == NULL || KERNEL_PANICKED())
1622 class = LOCK_CLASS(lock);
1624 /* Find lock instance associated with this lock. */
1625 if (class->lc_flags & LC_SLEEPLOCK)
1626 lock_list = &td->td_sleeplocks;
1628 lock_list = PCPU_PTR(spinlocks);
1630 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1631 for (i = 0; i < (*lock_list)->ll_count; i++) {
1632 instance = &(*lock_list)->ll_children[i];
1633 if (instance->li_lock == lock)
1638 * When disabling WITNESS through witness_watch we could end up in
1639 * having registered locks in the td_sleeplocks queue.
1640 * We have to make sure we flush these queues, so just search for
1641 * eventual register locks and remove them.
1643 if (witness_watch > 0) {
1644 kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1645 lock->lo_name, fixup_filename(file), line);
1652 /* First, check for shared/exclusive mismatches. */
1653 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1654 (flags & LOP_EXCLUSIVE) == 0) {
1655 witness_output("shared unlock of (%s) %s @ %s:%d\n",
1656 class->lc_name, lock->lo_name, fixup_filename(file), line);
1657 witness_output("while exclusively locked from %s:%d\n",
1658 fixup_filename(instance->li_file), instance->li_line);
1659 kassert_panic("excl->ushare");
1661 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1662 (flags & LOP_EXCLUSIVE) != 0) {
1663 witness_output("exclusive unlock of (%s) %s @ %s:%d\n",
1664 class->lc_name, lock->lo_name, fixup_filename(file), line);
1665 witness_output("while share locked from %s:%d\n",
1666 fixup_filename(instance->li_file),
1668 kassert_panic("share->uexcl");
1670 /* If we are recursed, unrecurse. */
1671 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1672 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1673 td->td_proc->p_pid, instance->li_lock->lo_name,
1674 instance->li_flags);
1675 instance->li_flags--;
1678 /* The lock is now being dropped, check for NORELEASE flag */
1679 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1680 witness_output("forbidden unlock of (%s) %s @ %s:%d\n",
1681 class->lc_name, lock->lo_name, fixup_filename(file), line);
1682 kassert_panic("lock marked norelease");
1685 /* Otherwise, remove this item from the list. */
1687 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1688 td->td_proc->p_pid, instance->li_lock->lo_name,
1689 (*lock_list)->ll_count - 1);
1690 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1691 (*lock_list)->ll_children[j] =
1692 (*lock_list)->ll_children[j + 1];
1693 (*lock_list)->ll_count--;
1697 * In order to reduce contention on w_mtx, we want to keep always an
1698 * head object into lists so that frequent allocation from the
1699 * free witness pool (and subsequent locking) is avoided.
1700 * In order to maintain the current code simple, when the head
1701 * object is totally unloaded it means also that we do not have
1702 * further objects in the list, so the list ownership needs to be
1703 * hand over to another object if the current head needs to be freed.
1705 if ((*lock_list)->ll_count == 0) {
1706 if (*lock_list == lle) {
1707 if (lle->ll_next == NULL)
1711 *lock_list = lle->ll_next;
1712 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1713 td->td_proc->p_pid, lle);
1714 witness_lock_list_free(lle);
1719 witness_thread_exit(struct thread *td)
1721 struct lock_list_entry *lle;
1724 lle = td->td_sleeplocks;
1725 if (lle == NULL || KERNEL_PANICKED())
1727 if (lle->ll_count != 0) {
1728 for (n = 0; lle != NULL; lle = lle->ll_next)
1729 for (i = lle->ll_count - 1; i >= 0; i--) {
1732 "Thread %p exiting with the following locks held:\n", td);
1734 witness_list_lock(&lle->ll_children[i],
1739 "Thread %p cannot exit while holding sleeplocks\n", td);
1741 witness_lock_list_free(lle);
1745 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1746 * exempt Giant and sleepable locks from the checks as well. If any
1747 * non-exempt locks are held, then a supplied message is printed to the
1748 * output channel along with a list of the offending locks. If indicated in the
1749 * flags then a failure results in a panic as well.
1752 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1754 struct lock_list_entry *lock_list, *lle;
1755 struct lock_instance *lock1;
1760 if (witness_cold || witness_watch < 1 || KERNEL_PANICKED())
1764 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1765 for (i = lle->ll_count - 1; i >= 0; i--) {
1766 lock1 = &lle->ll_children[i];
1767 if (lock1->li_lock == lock)
1769 if (flags & WARN_GIANTOK &&
1770 lock1->li_lock == &Giant.lock_object)
1772 if (flags & WARN_SLEEPOK &&
1773 (lock1->li_flags & LI_SLEEPABLE) != 0)
1779 printf(" with the following %slocks held:\n",
1780 (flags & WARN_SLEEPOK) != 0 ?
1781 "non-sleepable " : "");
1784 witness_list_lock(lock1, printf);
1788 * Pin the thread in order to avoid problems with thread migration.
1789 * Once that all verifies are passed about spinlocks ownership,
1790 * the thread is in a safe path and it can be unpinned.
1793 lock_list = PCPU_GET(spinlocks);
1794 if (lock_list != NULL && lock_list->ll_count != 0) {
1798 * We should only have one spinlock and as long as
1799 * the flags cannot match for this locks class,
1800 * check if the first spinlock is the one curthread
1803 lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1804 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1805 lock1->li_lock == lock && n == 0)
1811 printf(" with the following %slocks held:\n",
1812 (flags & WARN_SLEEPOK) != 0 ? "non-sleepable " : "");
1813 n += witness_list_locks(&lock_list, printf);
1816 if (flags & WARN_PANIC && n)
1817 kassert_panic("%s", __func__);
1819 witness_debugger(n, __func__);
1824 witness_file(struct lock_object *lock)
1828 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1830 w = lock->lo_witness;
1835 witness_line(struct lock_object *lock)
1839 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1841 w = lock->lo_witness;
1845 static struct witness *
1846 enroll(const char *description, struct lock_class *lock_class)
1850 MPASS(description != NULL);
1852 if (witness_watch == -1 || KERNEL_PANICKED())
1854 if ((lock_class->lc_flags & LC_SPINLOCK)) {
1855 if (witness_skipspin)
1857 } else if ((lock_class->lc_flags & LC_SLEEPLOCK) == 0) {
1858 kassert_panic("lock class %s is not sleep or spin",
1859 lock_class->lc_name);
1863 mtx_lock_spin(&w_mtx);
1864 w = witness_hash_get(description);
1867 if ((w = witness_get()) == NULL)
1869 MPASS(strlen(description) < MAX_W_NAME);
1870 strcpy(w->w_name, description);
1871 w->w_class = lock_class;
1873 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1874 if (lock_class->lc_flags & LC_SPINLOCK) {
1875 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1877 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1878 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1882 /* Insert new witness into the hash */
1883 witness_hash_put(w);
1884 witness_increment_graph_generation();
1885 mtx_unlock_spin(&w_mtx);
1889 if (w->w_refcount == 1)
1890 w->w_class = lock_class;
1891 mtx_unlock_spin(&w_mtx);
1892 if (lock_class != w->w_class)
1894 "lock (%s) %s does not match earlier (%s) lock",
1895 description, lock_class->lc_name,
1896 w->w_class->lc_name);
1901 depart(struct witness *w)
1904 MPASS(w->w_refcount == 0);
1905 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1911 * Set file to NULL as it may point into a loadable module.
1915 witness_increment_graph_generation();
1919 adopt(struct witness *parent, struct witness *child)
1923 if (witness_cold == 0)
1924 mtx_assert(&w_mtx, MA_OWNED);
1926 /* If the relationship is already known, there's no work to be done. */
1927 if (isitmychild(parent, child))
1930 /* When the structure of the graph changes, bump up the generation. */
1931 witness_increment_graph_generation();
1934 * The hard part ... create the direct relationship, then propagate all
1935 * indirect relationships.
1937 pi = parent->w_index;
1938 ci = child->w_index;
1939 WITNESS_INDEX_ASSERT(pi);
1940 WITNESS_INDEX_ASSERT(ci);
1942 w_rmatrix[pi][ci] |= WITNESS_PARENT;
1943 w_rmatrix[ci][pi] |= WITNESS_CHILD;
1946 * If parent was not already an ancestor of child,
1947 * then we increment the descendant and ancestor counters.
1949 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1950 parent->w_num_descendants++;
1951 child->w_num_ancestors++;
1955 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1956 * an ancestor of 'pi' during this loop.
1958 for (i = 1; i <= w_max_used_index; i++) {
1959 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1963 /* Find each descendant of 'i' and mark it as a descendant. */
1964 for (j = 1; j <= w_max_used_index; j++) {
1967 * Skip children that are already marked as
1968 * descendants of 'i'.
1970 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1974 * We are only interested in descendants of 'ci'. Note
1975 * that 'ci' itself is counted as a descendant of 'ci'.
1977 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1980 w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1981 w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1982 w_data[i].w_num_descendants++;
1983 w_data[j].w_num_ancestors++;
1986 * Make sure we aren't marking a node as both an
1987 * ancestor and descendant. We should have caught
1988 * this as a lock order reversal earlier.
1990 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1991 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1992 printf("witness rmatrix paradox! [%d][%d]=%d "
1993 "both ancestor and descendant\n",
1994 i, j, w_rmatrix[i][j]);
1996 printf("Witness disabled.\n");
1999 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
2000 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
2001 printf("witness rmatrix paradox! [%d][%d]=%d "
2002 "both ancestor and descendant\n",
2003 j, i, w_rmatrix[j][i]);
2005 printf("Witness disabled.\n");
2013 itismychild(struct witness *parent, struct witness *child)
2017 MPASS(child != NULL && parent != NULL);
2018 if (witness_cold == 0)
2019 mtx_assert(&w_mtx, MA_OWNED);
2021 if (!witness_lock_type_equal(parent, child)) {
2022 if (witness_cold == 0) {
2024 mtx_unlock_spin(&w_mtx);
2029 "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
2030 "the same lock type", __func__, parent->w_name,
2031 parent->w_class->lc_name, child->w_name,
2032 child->w_class->lc_name);
2034 mtx_lock_spin(&w_mtx);
2036 adopt(parent, child);
2040 * Generic code for the isitmy*() functions. The rmask parameter is the
2041 * expected relationship of w1 to w2.
2044 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
2046 unsigned char r1, r2;
2051 WITNESS_INDEX_ASSERT(i1);
2052 WITNESS_INDEX_ASSERT(i2);
2053 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
2054 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
2056 /* The flags on one better be the inverse of the flags on the other */
2057 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
2058 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
2059 /* Don't squawk if we're potentially racing with an update. */
2060 if (!mtx_owned(&w_mtx))
2062 printf("%s: rmatrix mismatch between %s (index %d) and %s "
2063 "(index %d): w_rmatrix[%d][%d] == %hhx but "
2064 "w_rmatrix[%d][%d] == %hhx\n",
2065 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2068 printf("Witness disabled.\n");
2071 return (r1 & rmask);
2075 * Checks if @child is a direct child of @parent.
2078 isitmychild(struct witness *parent, struct witness *child)
2081 return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2085 * Checks if @descendant is a direct or inderect descendant of @ancestor.
2088 isitmydescendant(struct witness *ancestor, struct witness *descendant)
2091 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2096 blessed(struct witness *w1, struct witness *w2)
2099 struct witness_blessed *b;
2101 for (i = 0; i < nitems(blessed_list); i++) {
2102 b = &blessed_list[i];
2103 if (strcmp(w1->w_name, b->b_lock1) == 0) {
2104 if (strcmp(w2->w_name, b->b_lock2) == 0)
2108 if (strcmp(w1->w_name, b->b_lock2) == 0)
2109 if (strcmp(w2->w_name, b->b_lock1) == 0)
2115 static struct witness *
2121 if (witness_cold == 0)
2122 mtx_assert(&w_mtx, MA_OWNED);
2124 if (witness_watch == -1) {
2125 mtx_unlock_spin(&w_mtx);
2128 if (STAILQ_EMPTY(&w_free)) {
2130 mtx_unlock_spin(&w_mtx);
2131 printf("WITNESS: unable to allocate a new witness object\n");
2134 w = STAILQ_FIRST(&w_free);
2135 STAILQ_REMOVE_HEAD(&w_free, w_list);
2138 MPASS(index > 0 && index == w_max_used_index+1 &&
2139 index < witness_count);
2140 bzero(w, sizeof(*w));
2142 if (index > w_max_used_index)
2143 w_max_used_index = index;
2148 witness_free(struct witness *w)
2151 STAILQ_INSERT_HEAD(&w_free, w, w_list);
2155 static struct lock_list_entry *
2156 witness_lock_list_get(void)
2158 struct lock_list_entry *lle;
2160 if (witness_watch == -1)
2162 mtx_lock_spin(&w_mtx);
2163 lle = w_lock_list_free;
2166 mtx_unlock_spin(&w_mtx);
2167 printf("%s: witness exhausted\n", __func__);
2170 w_lock_list_free = lle->ll_next;
2171 mtx_unlock_spin(&w_mtx);
2172 bzero(lle, sizeof(*lle));
2177 witness_lock_list_free(struct lock_list_entry *lle)
2180 mtx_lock_spin(&w_mtx);
2181 lle->ll_next = w_lock_list_free;
2182 w_lock_list_free = lle;
2183 mtx_unlock_spin(&w_mtx);
2186 static struct lock_instance *
2187 find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2189 struct lock_list_entry *lle;
2190 struct lock_instance *instance;
2193 for (lle = list; lle != NULL; lle = lle->ll_next)
2194 for (i = lle->ll_count - 1; i >= 0; i--) {
2195 instance = &lle->ll_children[i];
2196 if (instance->li_lock == lock)
2203 witness_list_lock(struct lock_instance *instance,
2204 int (*prnt)(const char *fmt, ...))
2206 struct lock_object *lock;
2208 lock = instance->li_lock;
2209 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2210 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2211 if (lock->lo_witness->w_name != lock->lo_name)
2212 prnt(" (%s)", lock->lo_witness->w_name);
2213 prnt(" r = %d (%p) locked @ %s:%d\n",
2214 instance->li_flags & LI_RECURSEMASK, lock,
2215 fixup_filename(instance->li_file), instance->li_line);
2219 witness_output(const char *fmt, ...)
2225 ret = witness_voutput(fmt, ap);
2231 witness_voutput(const char *fmt, va_list ap)
2236 switch (witness_channel) {
2237 case WITNESS_CONSOLE:
2238 ret = vprintf(fmt, ap);
2241 vlog(LOG_NOTICE, fmt, ap);
2251 witness_thread_has_locks(struct thread *td)
2254 if (td->td_sleeplocks == NULL)
2256 return (td->td_sleeplocks->ll_count != 0);
2260 witness_proc_has_locks(struct proc *p)
2264 FOREACH_THREAD_IN_PROC(p, td) {
2265 if (witness_thread_has_locks(td))
2273 witness_list_locks(struct lock_list_entry **lock_list,
2274 int (*prnt)(const char *fmt, ...))
2276 struct lock_list_entry *lle;
2280 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2281 for (i = lle->ll_count - 1; i >= 0; i--) {
2282 witness_list_lock(&lle->ll_children[i], prnt);
2289 * This is a bit risky at best. We call this function when we have timed
2290 * out acquiring a spin lock, and we assume that the other CPU is stuck
2291 * with this lock held. So, we go groveling around in the other CPU's
2292 * per-cpu data to try to find the lock instance for this spin lock to
2293 * see when it was last acquired.
2296 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2297 int (*prnt)(const char *fmt, ...))
2299 struct lock_instance *instance;
2302 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2304 pc = pcpu_find(owner->td_oncpu);
2305 instance = find_instance(pc->pc_spinlocks, lock);
2306 if (instance != NULL)
2307 witness_list_lock(instance, prnt);
2311 witness_save(struct lock_object *lock, const char **filep, int *linep)
2313 struct lock_list_entry *lock_list;
2314 struct lock_instance *instance;
2315 struct lock_class *class;
2318 * This function is used independently in locking code to deal with
2319 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2322 if (SCHEDULER_STOPPED())
2324 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2325 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
2327 class = LOCK_CLASS(lock);
2328 if (class->lc_flags & LC_SLEEPLOCK)
2329 lock_list = curthread->td_sleeplocks;
2331 if (witness_skipspin)
2333 lock_list = PCPU_GET(spinlocks);
2335 instance = find_instance(lock_list, lock);
2336 if (instance == NULL) {
2337 kassert_panic("%s: lock (%s) %s not locked", __func__,
2338 class->lc_name, lock->lo_name);
2341 *filep = instance->li_file;
2342 *linep = instance->li_line;
2346 witness_restore(struct lock_object *lock, const char *file, int line)
2348 struct lock_list_entry *lock_list;
2349 struct lock_instance *instance;
2350 struct lock_class *class;
2353 * This function is used independently in locking code to deal with
2354 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2357 if (SCHEDULER_STOPPED())
2359 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2360 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
2362 class = LOCK_CLASS(lock);
2363 if (class->lc_flags & LC_SLEEPLOCK)
2364 lock_list = curthread->td_sleeplocks;
2366 if (witness_skipspin)
2368 lock_list = PCPU_GET(spinlocks);
2370 instance = find_instance(lock_list, lock);
2371 if (instance == NULL)
2372 kassert_panic("%s: lock (%s) %s not locked", __func__,
2373 class->lc_name, lock->lo_name);
2374 lock->lo_witness->w_file = file;
2375 lock->lo_witness->w_line = line;
2376 if (instance == NULL)
2378 instance->li_file = file;
2379 instance->li_line = line;
2383 witness_assert(const struct lock_object *lock, int flags, const char *file,
2386 #ifdef INVARIANT_SUPPORT
2387 struct lock_instance *instance;
2388 struct lock_class *class;
2390 if (lock->lo_witness == NULL || witness_watch < 1 || KERNEL_PANICKED())
2392 class = LOCK_CLASS(lock);
2393 if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2394 instance = find_instance(curthread->td_sleeplocks, lock);
2395 else if ((class->lc_flags & LC_SPINLOCK) != 0)
2396 instance = find_instance(PCPU_GET(spinlocks), lock);
2398 kassert_panic("Lock (%s) %s is not sleep or spin!",
2399 class->lc_name, lock->lo_name);
2404 if (instance != NULL)
2405 kassert_panic("Lock (%s) %s locked @ %s:%d.",
2406 class->lc_name, lock->lo_name,
2407 fixup_filename(file), line);
2410 case LA_LOCKED | LA_RECURSED:
2411 case LA_LOCKED | LA_NOTRECURSED:
2413 case LA_SLOCKED | LA_RECURSED:
2414 case LA_SLOCKED | LA_NOTRECURSED:
2416 case LA_XLOCKED | LA_RECURSED:
2417 case LA_XLOCKED | LA_NOTRECURSED:
2418 if (instance == NULL) {
2419 kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2420 class->lc_name, lock->lo_name,
2421 fixup_filename(file), line);
2424 if ((flags & LA_XLOCKED) != 0 &&
2425 (instance->li_flags & LI_EXCLUSIVE) == 0)
2427 "Lock (%s) %s not exclusively locked @ %s:%d.",
2428 class->lc_name, lock->lo_name,
2429 fixup_filename(file), line);
2430 if ((flags & LA_SLOCKED) != 0 &&
2431 (instance->li_flags & LI_EXCLUSIVE) != 0)
2433 "Lock (%s) %s exclusively locked @ %s:%d.",
2434 class->lc_name, lock->lo_name,
2435 fixup_filename(file), line);
2436 if ((flags & LA_RECURSED) != 0 &&
2437 (instance->li_flags & LI_RECURSEMASK) == 0)
2438 kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2439 class->lc_name, lock->lo_name,
2440 fixup_filename(file), line);
2441 if ((flags & LA_NOTRECURSED) != 0 &&
2442 (instance->li_flags & LI_RECURSEMASK) != 0)
2443 kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2444 class->lc_name, lock->lo_name,
2445 fixup_filename(file), line);
2448 kassert_panic("Invalid lock assertion at %s:%d.",
2449 fixup_filename(file), line);
2452 #endif /* INVARIANT_SUPPORT */
2456 witness_setflag(struct lock_object *lock, int flag, int set)
2458 struct lock_list_entry *lock_list;
2459 struct lock_instance *instance;
2460 struct lock_class *class;
2462 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
2464 class = LOCK_CLASS(lock);
2465 if (class->lc_flags & LC_SLEEPLOCK)
2466 lock_list = curthread->td_sleeplocks;
2468 if (witness_skipspin)
2470 lock_list = PCPU_GET(spinlocks);
2472 instance = find_instance(lock_list, lock);
2473 if (instance == NULL) {
2474 kassert_panic("%s: lock (%s) %s not locked", __func__,
2475 class->lc_name, lock->lo_name);
2480 instance->li_flags |= flag;
2482 instance->li_flags &= ~flag;
2486 witness_norelease(struct lock_object *lock)
2489 witness_setflag(lock, LI_NORELEASE, 1);
2493 witness_releaseok(struct lock_object *lock)
2496 witness_setflag(lock, LI_NORELEASE, 0);
2501 witness_ddb_list(struct thread *td)
2504 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2505 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2507 if (witness_watch < 1)
2510 witness_list_locks(&td->td_sleeplocks, db_printf);
2513 * We only handle spinlocks if td == curthread. This is somewhat broken
2514 * if td is currently executing on some other CPU and holds spin locks
2515 * as we won't display those locks. If we had a MI way of getting
2516 * the per-cpu data for a given cpu then we could use
2517 * td->td_oncpu to get the list of spinlocks for this thread
2520 * That still wouldn't really fix this unless we locked the scheduler
2521 * lock or stopped the other CPU to make sure it wasn't changing the
2522 * list out from under us. It is probably best to just not try to
2523 * handle threads on other CPU's for now.
2525 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2526 witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2529 DB_SHOW_COMMAND(locks, db_witness_list)
2534 td = db_lookup_thread(addr, true);
2537 witness_ddb_list(td);
2540 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2546 * It would be nice to list only threads and processes that actually
2547 * held sleep locks, but that information is currently not exported
2550 FOREACH_PROC_IN_SYSTEM(p) {
2551 if (!witness_proc_has_locks(p))
2553 FOREACH_THREAD_IN_PROC(p, td) {
2554 if (!witness_thread_has_locks(td))
2556 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2557 p->p_comm, td, td->td_tid);
2558 witness_ddb_list(td);
2564 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2566 DB_SHOW_COMMAND(witness, db_witness_display)
2569 witness_ddb_display(db_printf);
2574 sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx)
2576 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2577 struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2578 int generation, i, j;
2585 /* Allocate and init temporary storage space. */
2586 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2587 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2588 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2590 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2592 stack_zero(&tmp_data1->wlod_stack);
2593 stack_zero(&tmp_data2->wlod_stack);
2596 mtx_lock_spin(&w_mtx);
2597 generation = w_generation;
2598 mtx_unlock_spin(&w_mtx);
2599 sbuf_printf(sb, "Number of known direct relationships is %d\n",
2600 w_lohash.wloh_count);
2601 for (i = 1; i < w_max_used_index; i++) {
2602 mtx_lock_spin(&w_mtx);
2603 if (generation != w_generation) {
2604 mtx_unlock_spin(&w_mtx);
2606 /* The graph has changed, try again. */
2613 if (w1->w_reversed == 0) {
2614 mtx_unlock_spin(&w_mtx);
2618 /* Copy w1 locally so we can release the spin lock. */
2620 mtx_unlock_spin(&w_mtx);
2622 if (tmp_w1->w_reversed == 0)
2624 for (j = 1; j < w_max_used_index; j++) {
2625 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2628 mtx_lock_spin(&w_mtx);
2629 if (generation != w_generation) {
2630 mtx_unlock_spin(&w_mtx);
2632 /* The graph has changed, try again. */
2639 data1 = witness_lock_order_get(w1, w2);
2640 data2 = witness_lock_order_get(w2, w1);
2643 * Copy information locally so we can release the
2649 stack_zero(&tmp_data1->wlod_stack);
2650 stack_copy(&data1->wlod_stack,
2651 &tmp_data1->wlod_stack);
2653 if (data2 && data2 != data1) {
2654 stack_zero(&tmp_data2->wlod_stack);
2655 stack_copy(&data2->wlod_stack,
2656 &tmp_data2->wlod_stack);
2658 mtx_unlock_spin(&w_mtx);
2660 if (blessed(tmp_w1, tmp_w2))
2664 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2665 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2666 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2669 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2670 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2671 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2672 stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2673 sbuf_printf(sb, "\n");
2675 if (data2 && data2 != data1) {
2677 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2678 tmp_w2->w_name, tmp_w2->w_class->lc_name,
2679 tmp_w1->w_name, tmp_w1->w_class->lc_name);
2680 stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2681 sbuf_printf(sb, "\n");
2685 mtx_lock_spin(&w_mtx);
2686 if (generation != w_generation) {
2687 mtx_unlock_spin(&w_mtx);
2690 * The graph changed while we were printing stack data,
2697 mtx_unlock_spin(&w_mtx);
2699 /* Free temporary storage space. */
2700 free(tmp_data1, M_TEMP);
2701 free(tmp_data2, M_TEMP);
2702 free(tmp_w1, M_TEMP);
2703 free(tmp_w2, M_TEMP);
2707 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2712 if (witness_watch < 1) {
2713 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2717 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2721 sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND);
2725 sbuf_print_witness_badstacks(sb, &req->oldidx);
2728 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2736 sbuf_db_printf_drain(void *arg __unused, const char *data, int len)
2739 return (db_printf("%.*s", len, data));
2742 DB_SHOW_COMMAND(badstacks, db_witness_badstacks)
2748 sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN);
2749 sbuf_set_drain(&sb, sbuf_db_printf_drain, NULL);
2750 sbuf_print_witness_badstacks(&sb, &dummy);
2756 sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS)
2758 static const struct {
2759 enum witness_channel channel;
2762 { WITNESS_CONSOLE, "console" },
2763 { WITNESS_LOG, "log" },
2764 { WITNESS_NONE, "none" },
2771 for (i = 0; i < nitems(channels); i++)
2772 if (witness_channel == channels[i].channel) {
2773 snprintf(buf, sizeof(buf), "%s", channels[i].name);
2777 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2778 if (error != 0 || req->newptr == NULL)
2782 for (i = 0; i < nitems(channels); i++)
2783 if (strcmp(channels[i].name, buf) == 0) {
2784 witness_channel = channels[i].channel;
2792 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2799 error = SYSCTL_OUT(req, w_notallowed, sizeof(w_notallowed));
2803 if (witness_watch < 1) {
2804 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2808 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2813 error = sysctl_wire_old_buffer(req, 0);
2816 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2819 sbuf_printf(sb, "\n");
2821 mtx_lock_spin(&w_mtx);
2822 STAILQ_FOREACH(w, &w_all, w_list)
2824 STAILQ_FOREACH(w, &w_all, w_list)
2825 witness_add_fullgraph(sb, w);
2826 mtx_unlock_spin(&w_mtx);
2829 * Close the sbuf and return to userland.
2831 error = sbuf_finish(sb);
2838 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2842 value = witness_watch;
2843 error = sysctl_handle_int(oidp, &value, 0, req);
2844 if (error != 0 || req->newptr == NULL)
2846 if (value > 1 || value < -1 ||
2847 (witness_watch == -1 && value != witness_watch))
2849 witness_watch = value;
2854 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2858 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2862 WITNESS_INDEX_ASSERT(w->w_index);
2863 for (i = 1; i <= w_max_used_index; i++) {
2864 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2865 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2867 witness_add_fullgraph(sb, &w_data[i]);
2873 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2874 * interprets the key as a string and reads until the null
2875 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2876 * hash value computed from the key.
2879 witness_hash_djb2(const uint8_t *key, uint32_t size)
2881 unsigned int hash = 5381;
2884 /* hash = hash * 33 + key[i] */
2886 for (i = 0; i < size; i++)
2887 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2889 for (i = 0; key[i] != 0; i++)
2890 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2896 * Initializes the two witness hash tables. Called exactly once from
2897 * witness_initialize().
2900 witness_init_hash_tables(void)
2904 MPASS(witness_cold);
2906 /* Initialize the hash tables. */
2907 for (i = 0; i < WITNESS_HASH_SIZE; i++)
2908 w_hash.wh_array[i] = NULL;
2910 w_hash.wh_size = WITNESS_HASH_SIZE;
2911 w_hash.wh_count = 0;
2913 /* Initialize the lock order data hash. */
2915 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2916 memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2917 w_lodata[i].wlod_next = w_lofree;
2918 w_lofree = &w_lodata[i];
2920 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2921 w_lohash.wloh_count = 0;
2922 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2923 w_lohash.wloh_array[i] = NULL;
2926 static struct witness *
2927 witness_hash_get(const char *key)
2933 if (witness_cold == 0)
2934 mtx_assert(&w_mtx, MA_OWNED);
2935 hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2936 w = w_hash.wh_array[hash];
2938 if (strcmp(w->w_name, key) == 0)
2948 witness_hash_put(struct witness *w)
2953 MPASS(w->w_name != NULL);
2954 if (witness_cold == 0)
2955 mtx_assert(&w_mtx, MA_OWNED);
2956 KASSERT(witness_hash_get(w->w_name) == NULL,
2957 ("%s: trying to add a hash entry that already exists!", __func__));
2958 KASSERT(w->w_hash_next == NULL,
2959 ("%s: w->w_hash_next != NULL", __func__));
2961 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2962 w->w_hash_next = w_hash.wh_array[hash];
2963 w_hash.wh_array[hash] = w;
2967 static struct witness_lock_order_data *
2968 witness_lock_order_get(struct witness *parent, struct witness *child)
2970 struct witness_lock_order_data *data = NULL;
2971 struct witness_lock_order_key key;
2974 MPASS(parent != NULL && child != NULL);
2975 key.from = parent->w_index;
2976 key.to = child->w_index;
2977 WITNESS_INDEX_ASSERT(key.from);
2978 WITNESS_INDEX_ASSERT(key.to);
2979 if ((w_rmatrix[parent->w_index][child->w_index]
2980 & WITNESS_LOCK_ORDER_KNOWN) == 0)
2983 hash = witness_hash_djb2((const char*)&key,
2984 sizeof(key)) % w_lohash.wloh_size;
2985 data = w_lohash.wloh_array[hash];
2986 while (data != NULL) {
2987 if (witness_lock_order_key_equal(&data->wlod_key, &key))
2989 data = data->wlod_next;
2997 * Verify that parent and child have a known relationship, are not the same,
2998 * and child is actually a child of parent. This is done without w_mtx
2999 * to avoid contention in the common case.
3002 witness_lock_order_check(struct witness *parent, struct witness *child)
3005 if (parent != child &&
3006 w_rmatrix[parent->w_index][child->w_index]
3007 & WITNESS_LOCK_ORDER_KNOWN &&
3008 isitmychild(parent, child))
3015 witness_lock_order_add(struct witness *parent, struct witness *child)
3017 struct witness_lock_order_data *data = NULL;
3018 struct witness_lock_order_key key;
3021 MPASS(parent != NULL && child != NULL);
3022 key.from = parent->w_index;
3023 key.to = child->w_index;
3024 WITNESS_INDEX_ASSERT(key.from);
3025 WITNESS_INDEX_ASSERT(key.to);
3026 if (w_rmatrix[parent->w_index][child->w_index]
3027 & WITNESS_LOCK_ORDER_KNOWN)
3030 hash = witness_hash_djb2((const char*)&key,
3031 sizeof(key)) % w_lohash.wloh_size;
3032 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
3036 w_lofree = data->wlod_next;
3037 data->wlod_next = w_lohash.wloh_array[hash];
3038 data->wlod_key = key;
3039 w_lohash.wloh_array[hash] = data;
3040 w_lohash.wloh_count++;
3041 stack_zero(&data->wlod_stack);
3042 stack_save(&data->wlod_stack);
3046 /* Call this whenever the structure of the witness graph changes. */
3048 witness_increment_graph_generation(void)
3051 if (witness_cold == 0)
3052 mtx_assert(&w_mtx, MA_OWNED);
3057 witness_output_drain(void *arg __unused, const char *data, int len)
3060 witness_output("%.*s", len, data);
3065 witness_debugger(int cond, const char *msg)
3074 if (witness_trace) {
3075 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
3076 sbuf_set_drain(&sb, witness_output_drain, NULL);
3080 witness_output("stack backtrace:\n");
3081 stack_sbuf_print_ddb(&sb, &st);
3088 kdb_enter(KDB_WHY_WITNESS, msg);