]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_witness.c
Axe KTR_ALQ_MASK now that KTR_WITNESS is off unless you hack an #ifdef
[FreeBSD/FreeBSD.git] / sys / kern / subr_witness.c
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *      from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *      and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31
32 /*
33  * Implementation of the `witness' lock verifier.  Originally implemented for
34  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
35  * classes in FreeBSD.
36  */
37
38 /*
39  *      Main Entry: witness
40  *      Pronunciation: 'wit-n&s
41  *      Function: noun
42  *      Etymology: Middle English witnesse, from Old English witnes knowledge,
43  *          testimony, witness, from 2wit
44  *      Date: before 12th century
45  *      1 : attestation of a fact or event : TESTIMONY
46  *      2 : one that gives evidence; specifically : one who testifies in
47  *          a cause or before a judicial tribunal
48  *      3 : one asked to be present at a transaction so as to be able to
49  *          testify to its having taken place
50  *      4 : one who has personal knowledge of something
51  *      5 a : something serving as evidence or proof : SIGN
52  *        b : public affirmation by word or example of usually
53  *            religious faith or conviction <the heroic witness to divine
54  *            life -- Pilot>
55  *      6 capitalized : a member of the Jehovah's Witnesses 
56  */
57
58 /*
59  * Special rules concerning Giant and lock orders:
60  *
61  * 1) Giant must be acquired before any other mutexes.  Stated another way,
62  *    no other mutex may be held when Giant is acquired.
63  *
64  * 2) Giant must be released when blocking on a sleepable lock.
65  *
66  * This rule is less obvious, but is a result of Giant providing the same
67  * semantics as spl().  Basically, when a thread sleeps, it must release
68  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
69  * 2).
70  *
71  * 3) Giant may be acquired before or after sleepable locks.
72  *
73  * This rule is also not quite as obvious.  Giant may be acquired after
74  * a sleepable lock because it is a non-sleepable lock and non-sleepable
75  * locks may always be acquired while holding a sleepable lock.  The second
76  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
77  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
78  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
79  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
80  * execute.  Thus, acquiring Giant both before and after a sleepable lock
81  * will not result in a lock order reversal.
82  */
83
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
86
87 #include "opt_ddb.h"
88 #include "opt_witness.h"
89
90 #include <sys/param.h>
91 #include <sys/bus.h>
92 #include <sys/kdb.h>
93 #include <sys/kernel.h>
94 #include <sys/ktr.h>
95 #include <sys/lock.h>
96 #include <sys/malloc.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/sysctl.h>
100 #include <sys/systm.h>
101
102 #include <ddb/ddb.h>
103
104 #include <machine/stdarg.h>
105
106 /* Note that these traces do not work with KTR_ALQ. */
107 #if 0
108 #define KTR_WITNESS     KTR_SUBSYS
109 #else
110 #define KTR_WITNESS     0
111 #endif
112
113 /* Easier to stay with the old names. */
114 #define lo_list         lo_witness_data.lod_list
115 #define lo_witness      lo_witness_data.lod_witness
116
117 /* Define this to check for blessed mutexes */
118 #undef BLESSING
119
120 #define WITNESS_COUNT 1024
121 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
122 /*
123  * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
124  * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
125  * probably be safe for the most part, but it's still a SWAG.
126  */
127 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
128
129 #define WITNESS_NCHILDREN 6
130
131 struct witness_child_list_entry;
132
133 struct witness {
134         const   char *w_name;
135         struct  lock_class *w_class;
136         STAILQ_ENTRY(witness) w_list;           /* List of all witnesses. */
137         STAILQ_ENTRY(witness) w_typelist;       /* Witnesses of a type. */
138         struct  witness_child_list_entry *w_children;   /* Great evilness... */
139         const   char *w_file;
140         int     w_line;
141         u_int   w_level;
142         u_int   w_refcount;
143         u_char  w_Giant_squawked:1;
144         u_char  w_other_squawked:1;
145         u_char  w_same_squawked:1;
146         u_char  w_displayed:1;
147 };
148
149 struct witness_child_list_entry {
150         struct  witness_child_list_entry *wcl_next;
151         struct  witness *wcl_children[WITNESS_NCHILDREN];
152         u_int   wcl_count;
153 };
154
155 STAILQ_HEAD(witness_list, witness);
156
157 #ifdef BLESSING
158 struct witness_blessed {
159         const   char *b_lock1;
160         const   char *b_lock2;
161 };
162 #endif
163
164 struct witness_order_list_entry {
165         const   char *w_name;
166         struct  lock_class *w_class;
167 };
168
169 #ifdef BLESSING
170 static int      blessed(struct witness *, struct witness *);
171 #endif
172 static int      depart(struct witness *w);
173 static struct   witness *enroll(const char *description,
174                                 struct lock_class *lock_class);
175 static int      insertchild(struct witness *parent, struct witness *child);
176 static int      isitmychild(struct witness *parent, struct witness *child);
177 static int      isitmydescendant(struct witness *parent, struct witness *child);
178 static int      itismychild(struct witness *parent, struct witness *child);
179 static void     removechild(struct witness *parent, struct witness *child);
180 static int      sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
181 static const char *fixup_filename(const char *file);
182 static struct   witness *witness_get(void);
183 static void     witness_free(struct witness *m);
184 static struct   witness_child_list_entry *witness_child_get(void);
185 static void     witness_child_free(struct witness_child_list_entry *wcl);
186 static struct   lock_list_entry *witness_lock_list_get(void);
187 static void     witness_lock_list_free(struct lock_list_entry *lle);
188 static struct   lock_instance *find_instance(struct lock_list_entry *lock_list,
189                                              struct lock_object *lock);
190 static void     witness_list_lock(struct lock_instance *instance);
191 #ifdef DDB
192 static void     witness_leveldescendents(struct witness *parent, int level);
193 static void     witness_levelall(void);
194 static void     witness_displaydescendants(void(*)(const char *fmt, ...),
195                                            struct witness *, int indent);
196 static void     witness_display_list(void(*prnt)(const char *fmt, ...),
197                                      struct witness_list *list);
198 static void     witness_display(void(*)(const char *fmt, ...));
199 static void     witness_list(struct thread *td);
200 #endif
201
202 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
203
204 /*
205  * If set to 0, witness is disabled.  If set to a non-zero value, witness
206  * performs full lock order checking for all locks.  At runtime, this
207  * value may be set to 0 to turn off witness.  witness is not allowed be
208  * turned on once it is turned off, however.
209  */
210 static int witness_watch = 1;
211 TUNABLE_INT("debug.witness.watch", &witness_watch);
212 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
213     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
214
215 #ifdef KDB
216 /*
217  * When KDB is enabled and witness_kdb is set to 1, it will cause the system
218  * to drop into kdebug() when:
219  *      - a lock hierarchy violation occurs
220  *      - locks are held when going to sleep.
221  */
222 #ifdef WITNESS_KDB
223 int     witness_kdb = 1;
224 #else
225 int     witness_kdb = 0;
226 #endif
227 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
228 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
229
230 /*
231  * When KDB is enabled and witness_trace is set to 1, it will cause the system
232  * to print a stack trace:
233  *      - a lock hierarchy violation occurs
234  *      - locks are held when going to sleep.
235  */
236 int     witness_trace = 1;
237 TUNABLE_INT("debug.witness.trace", &witness_trace);
238 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
239 #endif /* KDB */
240
241 #ifdef WITNESS_SKIPSPIN
242 int     witness_skipspin = 1;
243 #else
244 int     witness_skipspin = 0;
245 #endif
246 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
247 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
248     &witness_skipspin, 0, "");
249
250 static struct mtx w_mtx;
251 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
252 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
253 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
254 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
255 static struct witness_child_list_entry *w_child_free = NULL;
256 static struct lock_list_entry *w_lock_list_free = NULL;
257
258 static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
259 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
260 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
261 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
262     "");
263 SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
264     &w_child_free_cnt, 0, "");
265 SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
266     "");
267
268 static struct witness w_data[WITNESS_COUNT];
269 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
270 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
271
272 static struct witness_order_list_entry order_lists[] = {
273         /*
274          * sx locks
275          */
276         { "proctree", &lock_class_sx },
277         { "allproc", &lock_class_sx },
278         { NULL, NULL },
279         /*
280          * Various mutexes
281          */
282         { "Giant", &lock_class_mtx_sleep },
283         { "filedesc structure", &lock_class_mtx_sleep },
284         { "pipe mutex", &lock_class_mtx_sleep },
285         { "sigio lock", &lock_class_mtx_sleep },
286         { "process group", &lock_class_mtx_sleep },
287         { "process lock", &lock_class_mtx_sleep },
288         { "session", &lock_class_mtx_sleep },
289         { "uidinfo hash", &lock_class_mtx_sleep },
290         { "uidinfo struct", &lock_class_mtx_sleep },
291         { "allprison", &lock_class_mtx_sleep },
292         { NULL, NULL },
293         /*
294          * Sockets
295          */
296         { "filedesc structure", &lock_class_mtx_sleep },
297         { "accept", &lock_class_mtx_sleep },
298         { "so_snd", &lock_class_mtx_sleep },
299         { "so_rcv", &lock_class_mtx_sleep },
300         { "sellck", &lock_class_mtx_sleep },
301         { NULL, NULL },
302         /*
303          * Routing
304          */
305         { "so_rcv", &lock_class_mtx_sleep },
306         { "radix node head", &lock_class_mtx_sleep },
307         { "rtentry", &lock_class_mtx_sleep },
308         { "ifaddr", &lock_class_mtx_sleep },
309         { NULL, NULL },
310         /*
311          * Multicast - protocol locks before interface locks, after UDP locks.
312          */
313         { "udpinp", &lock_class_mtx_sleep },
314         { "in_multi_mtx", &lock_class_mtx_sleep },
315         { "igmp_mtx", &lock_class_mtx_sleep },
316         { "if_addr_mtx", &lock_class_mtx_sleep },
317         { NULL, NULL },
318         /*
319          * UNIX Domain Sockets
320          */
321         { "unp", &lock_class_mtx_sleep },
322         { "so_snd", &lock_class_mtx_sleep },
323         { NULL, NULL },
324         /*
325          * UDP/IP
326          */
327         { "udp", &lock_class_mtx_sleep },
328         { "udpinp", &lock_class_mtx_sleep },
329         { "so_snd", &lock_class_mtx_sleep },
330         { NULL, NULL },
331         /*
332          * TCP/IP
333          */
334         { "tcp", &lock_class_mtx_sleep },
335         { "tcpinp", &lock_class_mtx_sleep },
336         { "so_snd", &lock_class_mtx_sleep },
337         { NULL, NULL },
338         /*
339          * SLIP
340          */
341         { "slip_mtx", &lock_class_mtx_sleep },
342         { "slip sc_mtx", &lock_class_mtx_sleep },
343         { NULL, NULL },
344         /*
345          * netatalk
346          */
347         { "ddp_list_mtx", &lock_class_mtx_sleep },
348         { "ddp_mtx", &lock_class_mtx_sleep },
349         { NULL, NULL },
350         /*
351          * BPF
352          */
353         { "bpf global lock", &lock_class_mtx_sleep },
354         { "bpf interface lock", &lock_class_mtx_sleep },
355         { "bpf cdev lock", &lock_class_mtx_sleep },
356         { NULL, NULL },
357         /*
358          * NFS server
359          */
360         { "nfsd_mtx", &lock_class_mtx_sleep },
361         { "so_snd", &lock_class_mtx_sleep },
362         { NULL, NULL },
363         /*
364          * CDEV
365          */
366         { "system map", &lock_class_mtx_sleep },
367         { "vm page queue mutex", &lock_class_mtx_sleep },
368         { "vnode interlock", &lock_class_mtx_sleep },
369         { "cdev", &lock_class_mtx_sleep },
370         { NULL, NULL },
371         /*
372          * spin locks
373          */
374 #ifdef SMP
375         { "ap boot", &lock_class_mtx_spin },
376 #endif
377         { "rm.mutex_mtx", &lock_class_mtx_spin },
378         { "hptlock", &lock_class_mtx_spin },
379         { "sio", &lock_class_mtx_spin },
380 #ifdef __i386__
381         { "cy", &lock_class_mtx_spin },
382 #endif
383         { "uart_hwmtx", &lock_class_mtx_spin },
384         { "sabtty", &lock_class_mtx_spin },
385         { "zstty", &lock_class_mtx_spin },
386         { "ng_node", &lock_class_mtx_spin },
387         { "ng_worklist", &lock_class_mtx_spin },
388         { "taskqueue_fast", &lock_class_mtx_spin },
389         { "intr table", &lock_class_mtx_spin },
390         { "sleepq chain", &lock_class_mtx_spin },
391         { "sched lock", &lock_class_mtx_spin },
392         { "turnstile chain", &lock_class_mtx_spin },
393         { "td_contested", &lock_class_mtx_spin },
394         { "callout", &lock_class_mtx_spin },
395         { "entropy harvest mutex", &lock_class_mtx_spin },
396         /*
397          * leaf locks
398          */
399         { "allpmaps", &lock_class_mtx_spin },
400         { "vm page queue free mutex", &lock_class_mtx_spin },
401         { "icu", &lock_class_mtx_spin },
402 #ifdef SMP
403         { "smp rendezvous", &lock_class_mtx_spin },
404 #if defined(__i386__) || defined(__amd64__)
405         { "tlb", &lock_class_mtx_spin },
406 #endif
407 #ifdef __sparc64__
408         { "ipi", &lock_class_mtx_spin },
409         { "rtc_mtx", &lock_class_mtx_spin },
410 #endif
411 #endif
412         { "clk", &lock_class_mtx_spin },
413         { "mutex profiling lock", &lock_class_mtx_spin },
414         { "kse zombie lock", &lock_class_mtx_spin },
415         { "ALD Queue", &lock_class_mtx_spin },
416 #ifdef __ia64__
417         { "MCA spin lock", &lock_class_mtx_spin },
418 #endif
419 #if defined(__i386__) || defined(__amd64__)
420         { "pcicfg", &lock_class_mtx_spin },
421         { "NDIS thread lock", &lock_class_mtx_spin },
422 #endif
423         { "tw_osl_io_lock", &lock_class_mtx_spin },
424         { "tw_osl_q_lock", &lock_class_mtx_spin },
425         { "tw_cl_io_lock", &lock_class_mtx_spin },
426         { "tw_cl_intr_lock", &lock_class_mtx_spin },
427         { "tw_cl_gen_lock", &lock_class_mtx_spin },
428         { NULL, NULL },
429         { NULL, NULL }
430 };
431
432 #ifdef BLESSING
433 /*
434  * Pairs of locks which have been blessed
435  * Don't complain about order problems with blessed locks
436  */
437 static struct witness_blessed blessed_list[] = {
438 };
439 static int blessed_count =
440         sizeof(blessed_list) / sizeof(struct witness_blessed);
441 #endif
442
443 /*
444  * List of locks initialized prior to witness being initialized whose
445  * enrollment is currently deferred.
446  */
447 STAILQ_HEAD(, lock_object) pending_locks =
448     STAILQ_HEAD_INITIALIZER(pending_locks);
449
450 /*
451  * This global is set to 0 once it becomes safe to use the witness code.
452  */
453 static int witness_cold = 1;
454
455 /*
456  * This global is set to 1 once the static lock orders have been enrolled
457  * so that a warning can be issued for any spin locks enrolled later.
458  */
459 static int witness_spin_warn = 0;
460
461 /*
462  * The WITNESS-enabled diagnostic code.  Note that the witness code does
463  * assume that the early boot is single-threaded at least until after this
464  * routine is completed.
465  */
466 static void
467 witness_initialize(void *dummy __unused)
468 {
469         struct lock_object *lock;
470         struct witness_order_list_entry *order;
471         struct witness *w, *w1;
472         int i;
473
474         /*
475          * We have to release Giant before initializing its witness
476          * structure so that WITNESS doesn't get confused.
477          */
478         mtx_unlock(&Giant);
479         mtx_assert(&Giant, MA_NOTOWNED);
480
481         CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
482         mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
483             MTX_NOWITNESS);
484         for (i = 0; i < WITNESS_COUNT; i++)
485                 witness_free(&w_data[i]);
486         for (i = 0; i < WITNESS_CHILDCOUNT; i++)
487                 witness_child_free(&w_childdata[i]);
488         for (i = 0; i < LOCK_CHILDCOUNT; i++)
489                 witness_lock_list_free(&w_locklistdata[i]);
490
491         /* First add in all the specified order lists. */
492         for (order = order_lists; order->w_name != NULL; order++) {
493                 w = enroll(order->w_name, order->w_class);
494                 if (w == NULL)
495                         continue;
496                 w->w_file = "order list";
497                 for (order++; order->w_name != NULL; order++) {
498                         w1 = enroll(order->w_name, order->w_class);
499                         if (w1 == NULL)
500                                 continue;
501                         w1->w_file = "order list";
502                         if (!itismychild(w, w1))
503                                 panic("Not enough memory for static orders!");
504                         w = w1;
505                 }
506         }
507         witness_spin_warn = 1;
508
509         /* Iterate through all locks and add them to witness. */
510         while (!STAILQ_EMPTY(&pending_locks)) {
511                 lock = STAILQ_FIRST(&pending_locks);
512                 STAILQ_REMOVE_HEAD(&pending_locks, lo_list);
513                 KASSERT(lock->lo_flags & LO_WITNESS,
514                     ("%s: lock %s is on pending list but not LO_WITNESS",
515                     __func__, lock->lo_name));
516                 lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock));
517         }
518
519         /* Mark the witness code as being ready for use. */
520         witness_cold = 0;
521
522         mtx_lock(&Giant);
523 }
524 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
525
526 static int
527 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
528 {
529         int error, value;
530
531         value = witness_watch;
532         error = sysctl_handle_int(oidp, &value, 0, req);
533         if (error != 0 || req->newptr == NULL)
534                 return (error);
535         error = suser(req->td);
536         if (error != 0)
537                 return (error);
538         if (value == witness_watch)
539                 return (0);
540         if (value != 0)
541                 return (EINVAL);
542         witness_watch = 0;
543         return (0);
544 }
545
546 void
547 witness_init(struct lock_object *lock)
548 {
549         struct lock_class *class;
550
551         /* Various sanity checks. */
552         class = LOCK_CLASS(lock);
553         if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
554             (class->lc_flags & LC_RECURSABLE) == 0)
555                 panic("%s: lock (%s) %s can not be recursable", __func__,
556                     class->lc_name, lock->lo_name);
557         if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
558             (class->lc_flags & LC_SLEEPABLE) == 0)
559                 panic("%s: lock (%s) %s can not be sleepable", __func__,
560                     class->lc_name, lock->lo_name);
561         if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
562             (class->lc_flags & LC_UPGRADABLE) == 0)
563                 panic("%s: lock (%s) %s can not be upgradable", __func__,
564                     class->lc_name, lock->lo_name);
565
566         /*
567          * If we shouldn't watch this lock, then just clear lo_witness.
568          * Otherwise, if witness_cold is set, then it is too early to
569          * enroll this lock, so defer it to witness_initialize() by adding
570          * it to the pending_locks list.  If it is not too early, then enroll
571          * the lock now.
572          */
573         if (witness_watch == 0 || panicstr != NULL ||
574             (lock->lo_flags & LO_WITNESS) == 0)
575                 lock->lo_witness = NULL;
576         else if (witness_cold) {
577                 STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list);
578                 lock->lo_flags |= LO_ENROLLPEND;
579         } else
580                 lock->lo_witness = enroll(lock->lo_type, class);
581 }
582
583 void
584 witness_destroy(struct lock_object *lock)
585 {
586         struct lock_class *class;
587         struct witness *w;
588
589         class = LOCK_CLASS(lock);
590         if (witness_cold)
591                 panic("lock (%s) %s destroyed while witness_cold",
592                     class->lc_name, lock->lo_name);
593
594         /* XXX: need to verify that no one holds the lock */
595         if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS &&
596             lock->lo_witness != NULL) {
597                 w = lock->lo_witness;
598                 mtx_lock_spin(&w_mtx);
599                 MPASS(w->w_refcount > 0);
600                 w->w_refcount--;
601
602                 /*
603                  * Lock is already released if we have an allocation failure
604                  * and depart() fails.
605                  */
606                 if (w->w_refcount != 0 || depart(w))
607                         mtx_unlock_spin(&w_mtx);
608         }
609
610         /*
611          * If this lock is destroyed before witness is up and running,
612          * remove it from the pending list.
613          */
614         if (lock->lo_flags & LO_ENROLLPEND) {
615                 STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list);
616                 lock->lo_flags &= ~LO_ENROLLPEND;
617         }
618 }
619
620 #ifdef DDB
621 static void
622 witness_levelall (void)
623 {
624         struct witness_list *list;
625         struct witness *w, *w1;
626
627         /*
628          * First clear all levels.
629          */
630         STAILQ_FOREACH(w, &w_all, w_list) {
631                 w->w_level = 0;
632         }
633
634         /*
635          * Look for locks with no parent and level all their descendants.
636          */
637         STAILQ_FOREACH(w, &w_all, w_list) {
638                 /*
639                  * This is just an optimization, technically we could get
640                  * away just walking the all list each time.
641                  */
642                 if (w->w_class->lc_flags & LC_SLEEPLOCK)
643                         list = &w_sleep;
644                 else
645                         list = &w_spin;
646                 STAILQ_FOREACH(w1, list, w_typelist) {
647                         if (isitmychild(w1, w))
648                                 goto skip;
649                 }
650                 witness_leveldescendents(w, 0);
651         skip:
652                 ;       /* silence GCC 3.x */
653         }
654 }
655
656 static void
657 witness_leveldescendents(struct witness *parent, int level)
658 {
659         struct witness_child_list_entry *wcl;
660         int i;
661
662         if (parent->w_level < level)
663                 parent->w_level = level;
664         level++;
665         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
666                 for (i = 0; i < wcl->wcl_count; i++)
667                         witness_leveldescendents(wcl->wcl_children[i], level);
668 }
669
670 static void
671 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
672                            struct witness *parent, int indent)
673 {
674         struct witness_child_list_entry *wcl;
675         int i, level;
676
677         level = parent->w_level;
678         prnt("%-2d", level);
679         for (i = 0; i < indent; i++)
680                 prnt(" ");
681         if (parent->w_refcount > 0)
682                 prnt("%s", parent->w_name);
683         else
684                 prnt("(dead)");
685         if (parent->w_displayed) {
686                 prnt(" -- (already displayed)\n");
687                 return;
688         }
689         parent->w_displayed = 1;
690         if (parent->w_refcount > 0) {
691                 if (parent->w_file != NULL)
692                         prnt(" -- last acquired @ %s:%d", parent->w_file,
693                             parent->w_line);
694         }
695         prnt("\n");
696         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
697                 for (i = 0; i < wcl->wcl_count; i++)
698                             witness_displaydescendants(prnt,
699                                 wcl->wcl_children[i], indent + 1);
700 }
701
702 static void
703 witness_display_list(void(*prnt)(const char *fmt, ...),
704                      struct witness_list *list)
705 {
706         struct witness *w;
707
708         STAILQ_FOREACH(w, list, w_typelist) {
709                 if (w->w_file == NULL || w->w_level > 0)
710                         continue;
711                 /*
712                  * This lock has no anscestors, display its descendants. 
713                  */
714                 witness_displaydescendants(prnt, w, 0);
715         }
716 }
717         
718 static void
719 witness_display(void(*prnt)(const char *fmt, ...))
720 {
721         struct witness *w;
722
723         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
724         witness_levelall();
725
726         /* Clear all the displayed flags. */
727         STAILQ_FOREACH(w, &w_all, w_list) {
728                 w->w_displayed = 0;
729         }
730
731         /*
732          * First, handle sleep locks which have been acquired at least
733          * once.
734          */
735         prnt("Sleep locks:\n");
736         witness_display_list(prnt, &w_sleep);
737         
738         /*
739          * Now do spin locks which have been acquired at least once.
740          */
741         prnt("\nSpin locks:\n");
742         witness_display_list(prnt, &w_spin);
743         
744         /*
745          * Finally, any locks which have not been acquired yet.
746          */
747         prnt("\nLocks which were never acquired:\n");
748         STAILQ_FOREACH(w, &w_all, w_list) {
749                 if (w->w_file != NULL || w->w_refcount == 0)
750                         continue;
751                 prnt("%s\n", w->w_name);
752         }
753 }
754 #endif /* DDB */
755
756 /* Trim useless garbage from filenames. */
757 static const char *
758 fixup_filename(const char *file)
759 {
760
761         if (file == NULL)
762                 return (NULL);
763         while (strncmp(file, "../", 3) == 0)
764                 file += 3;
765         return (file);
766 }
767
768 int
769 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
770 {
771
772         if (witness_watch == 0 || panicstr != NULL)
773                 return (0);
774
775         /* Require locks that witness knows about. */
776         if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
777             lock2->lo_witness == NULL)
778                 return (EINVAL);
779
780         MPASS(!mtx_owned(&w_mtx));
781         mtx_lock_spin(&w_mtx);
782
783         /*
784          * If we already have either an explicit or implied lock order that
785          * is the other way around, then return an error.
786          */
787         if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
788                 mtx_unlock_spin(&w_mtx);
789                 return (EDOOFUS);
790         }
791         
792         /* Try to add the new order. */
793         CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
794             lock2->lo_type, lock1->lo_type);
795         if (!itismychild(lock1->lo_witness, lock2->lo_witness))
796                 return (ENOMEM);
797         mtx_unlock_spin(&w_mtx);
798         return (0);
799 }
800
801 void
802 witness_checkorder(struct lock_object *lock, int flags, const char *file,
803     int line)
804 {
805         struct lock_list_entry **lock_list, *lle;
806         struct lock_instance *lock1, *lock2;
807         struct lock_class *class;
808         struct witness *w, *w1;
809         struct thread *td;
810         int i, j;
811
812         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
813             panicstr != NULL)
814                 return;
815
816         /*
817          * Try locks do not block if they fail to acquire the lock, thus
818          * there is no danger of deadlocks or of switching while holding a
819          * spin lock if we acquire a lock via a try operation.  This
820          * function shouldn't even be called for try locks, so panic if
821          * that happens.
822          */
823         if (flags & LOP_TRYLOCK)
824                 panic("%s should not be called for try lock operations",
825                     __func__);
826
827         w = lock->lo_witness;
828         class = LOCK_CLASS(lock);
829         td = curthread;
830         file = fixup_filename(file);
831
832         if (class->lc_flags & LC_SLEEPLOCK) {
833                 /*
834                  * Since spin locks include a critical section, this check
835                  * implicitly enforces a lock order of all sleep locks before
836                  * all spin locks.
837                  */
838                 if (td->td_critnest != 0 && !kdb_active)
839                         panic("blockable sleep lock (%s) %s @ %s:%d",
840                             class->lc_name, lock->lo_name, file, line);
841
842                 /*
843                  * If this is the first lock acquired then just return as
844                  * no order checking is needed.
845                  */
846                 if (td->td_sleeplocks == NULL)
847                         return;
848                 lock_list = &td->td_sleeplocks;
849         } else {
850                 /*
851                  * If this is the first lock, just return as no order
852                  * checking is needed.  We check this in both if clauses
853                  * here as unifying the check would require us to use a
854                  * critical section to ensure we don't migrate while doing
855                  * the check.  Note that if this is not the first lock, we
856                  * are already in a critical section and are safe for the
857                  * rest of the check.
858                  */
859                 if (PCPU_GET(spinlocks) == NULL)
860                         return;
861                 lock_list = PCPU_PTR(spinlocks);
862         }
863
864         /*
865          * Check to see if we are recursing on a lock we already own.  If
866          * so, make sure that we don't mismatch exclusive and shared lock
867          * acquires.
868          */
869         lock1 = find_instance(*lock_list, lock);
870         if (lock1 != NULL) {
871                 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
872                     (flags & LOP_EXCLUSIVE) == 0) {
873                         printf("shared lock of (%s) %s @ %s:%d\n",
874                             class->lc_name, lock->lo_name, file, line);
875                         printf("while exclusively locked from %s:%d\n",
876                             lock1->li_file, lock1->li_line);
877                         panic("share->excl");
878                 }
879                 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
880                     (flags & LOP_EXCLUSIVE) != 0) {
881                         printf("exclusive lock of (%s) %s @ %s:%d\n",
882                             class->lc_name, lock->lo_name, file, line);
883                         printf("while share locked from %s:%d\n",
884                             lock1->li_file, lock1->li_line);
885                         panic("excl->share");
886                 }
887                 return;
888         }
889
890         /*
891          * Try locks do not block if they fail to acquire the lock, thus
892          * there is no danger of deadlocks or of switching while holding a
893          * spin lock if we acquire a lock via a try operation.
894          */
895         if (flags & LOP_TRYLOCK)
896                 return;
897
898         /*
899          * Check for duplicate locks of the same type.  Note that we only
900          * have to check for this on the last lock we just acquired.  Any
901          * other cases will be caught as lock order violations.
902          */
903         lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
904         w1 = lock1->li_lock->lo_witness;
905         if (w1 == w) {
906                 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
907                     (flags & LOP_DUPOK))
908                         return;
909                 w->w_same_squawked = 1;
910                 printf("acquiring duplicate lock of same type: \"%s\"\n", 
911                         lock->lo_type);
912                 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
913                     lock1->li_file, lock1->li_line);
914                 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
915 #ifdef KDB
916                 goto debugger;
917 #else
918                 return;
919 #endif
920         }
921         MPASS(!mtx_owned(&w_mtx));
922         mtx_lock_spin(&w_mtx);
923         /*
924          * If we know that the the lock we are acquiring comes after
925          * the lock we most recently acquired in the lock order tree,
926          * then there is no need for any further checks.
927          */
928         if (isitmychild(w1, w)) {
929                 mtx_unlock_spin(&w_mtx);
930                 return;
931         }
932         for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
933                 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
934
935                         MPASS(j < WITNESS_COUNT);
936                         lock1 = &lle->ll_children[i];
937                         w1 = lock1->li_lock->lo_witness;
938
939                         /*
940                          * If this lock doesn't undergo witness checking,
941                          * then skip it.
942                          */
943                         if (w1 == NULL) {
944                                 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
945                                     ("lock missing witness structure"));
946                                 continue;
947                         }
948                         /*
949                          * If we are locking Giant and this is a sleepable
950                          * lock, then skip it.
951                          */
952                         if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
953                             lock == &Giant.mtx_object)
954                                 continue;
955                         /*
956                          * If we are locking a sleepable lock and this lock
957                          * is Giant, then skip it.
958                          */
959                         if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
960                             lock1->li_lock == &Giant.mtx_object)
961                                 continue;
962                         /*
963                          * If we are locking a sleepable lock and this lock
964                          * isn't sleepable, we want to treat it as a lock
965                          * order violation to enfore a general lock order of
966                          * sleepable locks before non-sleepable locks.
967                          */
968                         if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
969                             (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
970                                 goto reversal;
971                         /*
972                          * If we are locking Giant and this is a non-sleepable
973                          * lock, then treat it as a reversal.
974                          */
975                         if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
976                             lock == &Giant.mtx_object)
977                                 goto reversal;
978                         /*
979                          * Check the lock order hierarchy for a reveresal.
980                          */
981                         if (!isitmydescendant(w, w1))
982                                 continue;
983                 reversal:
984                         /*
985                          * We have a lock order violation, check to see if it
986                          * is allowed or has already been yelled about.
987                          */
988                         mtx_unlock_spin(&w_mtx);
989 #ifdef BLESSING
990                         /*
991                          * If the lock order is blessed, just bail.  We don't
992                          * look for other lock order violations though, which
993                          * may be a bug.
994                          */
995                         if (blessed(w, w1))
996                                 return;
997 #endif
998                         if (lock1->li_lock == &Giant.mtx_object) {
999                                 if (w1->w_Giant_squawked)
1000                                         return;
1001                                 else
1002                                         w1->w_Giant_squawked = 1;
1003                         } else {
1004                                 if (w1->w_other_squawked)
1005                                         return;
1006                                 else
1007                                         w1->w_other_squawked = 1;
1008                         }
1009                         /*
1010                          * Ok, yell about it.
1011                          */
1012                         if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1013                             (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1014                                 printf(
1015                 "lock order reversal: (sleepable after non-sleepable)\n");
1016                         else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1017                             && lock == &Giant.mtx_object)
1018                                 printf(
1019                 "lock order reversal: (Giant after non-sleepable)\n");
1020                         else
1021                                 printf("lock order reversal:\n");
1022                         /*
1023                          * Try to locate an earlier lock with
1024                          * witness w in our list.
1025                          */
1026                         do {
1027                                 lock2 = &lle->ll_children[i];
1028                                 MPASS(lock2->li_lock != NULL);
1029                                 if (lock2->li_lock->lo_witness == w)
1030                                         break;
1031                                 if (i == 0 && lle->ll_next != NULL) {
1032                                         lle = lle->ll_next;
1033                                         i = lle->ll_count - 1;
1034                                         MPASS(i >= 0 && i < LOCK_NCHILDREN);
1035                                 } else
1036                                         i--;
1037                         } while (i >= 0);
1038                         if (i < 0) {
1039                                 printf(" 1st %p %s (%s) @ %s:%d\n",
1040                                     lock1->li_lock, lock1->li_lock->lo_name,
1041                                     lock1->li_lock->lo_type, lock1->li_file,
1042                                     lock1->li_line);
1043                                 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1044                                     lock->lo_name, lock->lo_type, file, line);
1045                         } else {
1046                                 printf(" 1st %p %s (%s) @ %s:%d\n",
1047                                     lock2->li_lock, lock2->li_lock->lo_name,
1048                                     lock2->li_lock->lo_type, lock2->li_file,
1049                                     lock2->li_line);
1050                                 printf(" 2nd %p %s (%s) @ %s:%d\n",
1051                                     lock1->li_lock, lock1->li_lock->lo_name,
1052                                     lock1->li_lock->lo_type, lock1->li_file,
1053                                     lock1->li_line);
1054                                 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1055                                     lock->lo_name, lock->lo_type, file, line);
1056                         }
1057 #ifdef KDB
1058                         goto debugger;
1059 #else
1060                         return;
1061 #endif
1062                 }
1063         }
1064         lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1065         /*
1066          * If requested, build a new lock order.  However, don't build a new
1067          * relationship between a sleepable lock and Giant if it is in the
1068          * wrong direction.  The correct lock order is that sleepable locks
1069          * always come before Giant.
1070          */
1071         if (flags & LOP_NEWORDER &&
1072             !(lock1->li_lock == &Giant.mtx_object &&
1073             (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1074                 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1075                     lock->lo_type, lock1->li_lock->lo_type);
1076                 if (!itismychild(lock1->li_lock->lo_witness, w))
1077                         /* Witness is dead. */
1078                         return;
1079         } 
1080         mtx_unlock_spin(&w_mtx);
1081         return;
1082
1083 #ifdef KDB
1084 debugger:
1085         if (witness_trace)
1086                 kdb_backtrace();
1087         if (witness_kdb)
1088                 kdb_enter(__func__);
1089 #endif
1090 }
1091
1092 void
1093 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1094 {
1095         struct lock_list_entry **lock_list, *lle;
1096         struct lock_instance *instance;
1097         struct witness *w;
1098         struct thread *td;
1099
1100         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1101             panicstr != NULL)
1102                 return;
1103         w = lock->lo_witness;
1104         td = curthread;
1105         file = fixup_filename(file);
1106
1107         /* Determine lock list for this lock. */
1108         if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1109                 lock_list = &td->td_sleeplocks;
1110         else
1111                 lock_list = PCPU_PTR(spinlocks);
1112
1113         /* Check to see if we are recursing on a lock we already own. */
1114         instance = find_instance(*lock_list, lock);
1115         if (instance != NULL) {
1116                 instance->li_flags++;
1117                 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1118                     td->td_proc->p_pid, lock->lo_name,
1119                     instance->li_flags & LI_RECURSEMASK);
1120                 instance->li_file = file;
1121                 instance->li_line = line;
1122                 return;
1123         }
1124
1125         /* Update per-witness last file and line acquire. */
1126         w->w_file = file;
1127         w->w_line = line;
1128
1129         /* Find the next open lock instance in the list and fill it. */
1130         lle = *lock_list;
1131         if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1132                 lle = witness_lock_list_get();
1133                 if (lle == NULL)
1134                         return;
1135                 lle->ll_next = *lock_list;
1136                 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1137                     td->td_proc->p_pid, lle);
1138                 *lock_list = lle;
1139         }
1140         instance = &lle->ll_children[lle->ll_count++];
1141         instance->li_lock = lock;
1142         instance->li_line = line;
1143         instance->li_file = file;
1144         if ((flags & LOP_EXCLUSIVE) != 0)
1145                 instance->li_flags = LI_EXCLUSIVE;
1146         else
1147                 instance->li_flags = 0;
1148         CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1149             td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1150 }
1151
1152 void
1153 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1154 {
1155         struct lock_instance *instance;
1156         struct lock_class *class;
1157
1158         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1159         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1160                 return;
1161         class = LOCK_CLASS(lock);
1162         file = fixup_filename(file);
1163         if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1164                 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1165                     class->lc_name, lock->lo_name, file, line);
1166         if ((flags & LOP_TRYLOCK) == 0)
1167                 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1168                     lock->lo_name, file, line);
1169         if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1170                 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1171                     class->lc_name, lock->lo_name, file, line);
1172         instance = find_instance(curthread->td_sleeplocks, lock);
1173         if (instance == NULL)
1174                 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1175                     class->lc_name, lock->lo_name, file, line);
1176         if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1177                 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1178                     class->lc_name, lock->lo_name, file, line);
1179         if ((instance->li_flags & LI_RECURSEMASK) != 0)
1180                 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1181                     class->lc_name, lock->lo_name,
1182                     instance->li_flags & LI_RECURSEMASK, file, line);
1183         instance->li_flags |= LI_EXCLUSIVE;
1184 }
1185
1186 void
1187 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1188     int line)
1189 {
1190         struct lock_instance *instance;
1191         struct lock_class *class;
1192
1193         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1194         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1195                 return;
1196         class = LOCK_CLASS(lock);
1197         file = fixup_filename(file);
1198         if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1199                 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1200                     class->lc_name, lock->lo_name, file, line);
1201         if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1202                 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1203                     class->lc_name, lock->lo_name, file, line);
1204         instance = find_instance(curthread->td_sleeplocks, lock);
1205         if (instance == NULL)
1206                 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1207                     class->lc_name, lock->lo_name, file, line);
1208         if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1209                 panic("downgrade of shared lock (%s) %s @ %s:%d",
1210                     class->lc_name, lock->lo_name, file, line);
1211         if ((instance->li_flags & LI_RECURSEMASK) != 0)
1212                 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1213                     class->lc_name, lock->lo_name,
1214                     instance->li_flags & LI_RECURSEMASK, file, line);
1215         instance->li_flags &= ~LI_EXCLUSIVE;
1216 }
1217
1218 void
1219 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1220 {
1221         struct lock_list_entry **lock_list, *lle;
1222         struct lock_instance *instance;
1223         struct lock_class *class;
1224         struct thread *td;
1225         register_t s;
1226         int i, j;
1227
1228         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1229             panicstr != NULL)
1230                 return;
1231         td = curthread;
1232         class = LOCK_CLASS(lock);
1233         file = fixup_filename(file);
1234
1235         /* Find lock instance associated with this lock. */
1236         if (class->lc_flags & LC_SLEEPLOCK)
1237                 lock_list = &td->td_sleeplocks;
1238         else
1239                 lock_list = PCPU_PTR(spinlocks);
1240         for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1241                 for (i = 0; i < (*lock_list)->ll_count; i++) {
1242                         instance = &(*lock_list)->ll_children[i];
1243                         if (instance->li_lock == lock)
1244                                 goto found;
1245                 }
1246         panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1247             file, line);
1248 found:
1249
1250         /* First, check for shared/exclusive mismatches. */
1251         if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1252             (flags & LOP_EXCLUSIVE) == 0) {
1253                 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1254                     lock->lo_name, file, line);
1255                 printf("while exclusively locked from %s:%d\n",
1256                     instance->li_file, instance->li_line);
1257                 panic("excl->ushare");
1258         }
1259         if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1260             (flags & LOP_EXCLUSIVE) != 0) {
1261                 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1262                     lock->lo_name, file, line);
1263                 printf("while share locked from %s:%d\n", instance->li_file,
1264                     instance->li_line);
1265                 panic("share->uexcl");
1266         }
1267
1268         /* If we are recursed, unrecurse. */
1269         if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1270                 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1271                     td->td_proc->p_pid, instance->li_lock->lo_name,
1272                     instance->li_flags);
1273                 instance->li_flags--;
1274                 return;
1275         }
1276
1277         /* Otherwise, remove this item from the list. */
1278         s = intr_disable();
1279         CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1280             td->td_proc->p_pid, instance->li_lock->lo_name,
1281             (*lock_list)->ll_count - 1);
1282         for (j = i; j < (*lock_list)->ll_count - 1; j++)
1283                 (*lock_list)->ll_children[j] =
1284                     (*lock_list)->ll_children[j + 1];
1285         (*lock_list)->ll_count--;
1286         intr_restore(s);
1287
1288         /* If this lock list entry is now empty, free it. */
1289         if ((*lock_list)->ll_count == 0) {
1290                 lle = *lock_list;
1291                 *lock_list = lle->ll_next;
1292                 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1293                     td->td_proc->p_pid, lle);
1294                 witness_lock_list_free(lle);
1295         }
1296 }
1297
1298 /*
1299  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1300  * exempt Giant and sleepable locks from the checks as well.  If any
1301  * non-exempt locks are held, then a supplied message is printed to the
1302  * console along with a list of the offending locks.  If indicated in the
1303  * flags then a failure results in a panic as well.
1304  */
1305 int
1306 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1307 {
1308         struct lock_list_entry *lle;
1309         struct lock_instance *lock1;
1310         struct thread *td;
1311         va_list ap;
1312         int i, n;
1313
1314         if (witness_cold || witness_watch == 0 || panicstr != NULL)
1315                 return (0);
1316         n = 0;
1317         td = curthread;
1318         for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1319                 for (i = lle->ll_count - 1; i >= 0; i--) {
1320                         lock1 = &lle->ll_children[i];
1321                         if (lock1->li_lock == lock)
1322                                 continue;
1323                         if (flags & WARN_GIANTOK &&
1324                             lock1->li_lock == &Giant.mtx_object)
1325                                 continue;
1326                         if (flags & WARN_SLEEPOK &&
1327                             (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1328                                 continue;
1329                         if (n == 0) {
1330                                 va_start(ap, fmt);
1331                                 vprintf(fmt, ap);
1332                                 va_end(ap);
1333                                 printf(" with the following");
1334                                 if (flags & WARN_SLEEPOK)
1335                                         printf(" non-sleepable");
1336                                 printf(" locks held:\n");
1337                         }
1338                         n++;
1339                         witness_list_lock(lock1);
1340                 }
1341         if (PCPU_GET(spinlocks) != NULL) {
1342                 /*
1343                  * Since we already hold a spinlock preemption is
1344                  * already blocked.
1345                  */
1346                 if (n == 0) {
1347                         va_start(ap, fmt);
1348                         vprintf(fmt, ap);
1349                         va_end(ap);
1350                         printf(" with the following");
1351                         if (flags & WARN_SLEEPOK)
1352                                 printf(" non-sleepable");
1353                         printf(" locks held:\n");
1354                 }
1355                 n += witness_list_locks(PCPU_PTR(spinlocks));
1356         }
1357         if (flags & WARN_PANIC && n)
1358                 panic("witness_warn");
1359 #ifdef KDB
1360         else if (witness_kdb && n)
1361                 kdb_enter(__func__);
1362         else if (witness_trace && n)
1363                 kdb_backtrace();
1364 #endif
1365         return (n);
1366 }
1367
1368 const char *
1369 witness_file(struct lock_object *lock)
1370 {
1371         struct witness *w;
1372
1373         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1374                 return ("?");
1375         w = lock->lo_witness;
1376         return (w->w_file);
1377 }
1378
1379 int
1380 witness_line(struct lock_object *lock)
1381 {
1382         struct witness *w;
1383
1384         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1385                 return (0);
1386         w = lock->lo_witness;
1387         return (w->w_line);
1388 }
1389
1390 static struct witness *
1391 enroll(const char *description, struct lock_class *lock_class)
1392 {
1393         struct witness *w;
1394
1395         if (witness_watch == 0 || panicstr != NULL)
1396                 return (NULL);
1397         if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1398                 return (NULL);
1399         mtx_lock_spin(&w_mtx);
1400         STAILQ_FOREACH(w, &w_all, w_list) {
1401                 if (w->w_name == description || (w->w_refcount > 0 &&
1402                     strcmp(description, w->w_name) == 0)) {
1403                         w->w_refcount++;
1404                         mtx_unlock_spin(&w_mtx);
1405                         if (lock_class != w->w_class)
1406                                 panic(
1407                                 "lock (%s) %s does not match earlier (%s) lock",
1408                                     description, lock_class->lc_name,
1409                                     w->w_class->lc_name);
1410                         return (w);
1411                 }
1412         }
1413         if ((w = witness_get()) == NULL)
1414                 goto out;
1415         w->w_name = description;
1416         w->w_class = lock_class;
1417         w->w_refcount = 1;
1418         STAILQ_INSERT_HEAD(&w_all, w, w_list);
1419         if (lock_class->lc_flags & LC_SPINLOCK) {
1420                 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1421                 w_spin_cnt++;
1422         } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1423                 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1424                 w_sleep_cnt++;
1425         } else {
1426                 mtx_unlock_spin(&w_mtx);
1427                 panic("lock class %s is not sleep or spin",
1428                     lock_class->lc_name);
1429         }
1430         mtx_unlock_spin(&w_mtx);
1431 out:
1432         /*
1433          * We issue a warning for any spin locks not defined in the static
1434          * order list as a way to discourage their use (folks should really
1435          * be using non-spin mutexes most of the time).  However, several
1436          * 3rd part device drivers use spin locks because that is all they
1437          * have available on Windows and Linux and they think that normal
1438          * mutexes are insufficient.
1439          */
1440         if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1441                 printf("WITNESS: spin lock %s not in order list\n",
1442                     description);
1443         return (w);
1444 }
1445
1446 /* Don't let the door bang you on the way out... */
1447 static int
1448 depart(struct witness *w)
1449 {
1450         struct witness_child_list_entry *wcl, *nwcl;
1451         struct witness_list *list;
1452         struct witness *parent;
1453
1454         MPASS(w->w_refcount == 0);
1455         if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1456                 list = &w_sleep;
1457                 w_sleep_cnt--;
1458         } else {
1459                 list = &w_spin;
1460                 w_spin_cnt--;
1461         }
1462         /*
1463          * First, we run through the entire tree looking for any
1464          * witnesses that the outgoing witness is a child of.  For
1465          * each parent that we find, we reparent all the direct
1466          * children of the outgoing witness to its parent.
1467          */
1468         STAILQ_FOREACH(parent, list, w_typelist) {
1469                 if (!isitmychild(parent, w))
1470                         continue;
1471                 removechild(parent, w);
1472         }
1473
1474         /*
1475          * Now we go through and free up the child list of the
1476          * outgoing witness.
1477          */
1478         for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1479                 nwcl = wcl->wcl_next;
1480                 w_child_cnt--;
1481                 witness_child_free(wcl);
1482         }
1483
1484         /*
1485          * Detach from various lists and free.
1486          */
1487         STAILQ_REMOVE(list, w, witness, w_typelist);
1488         STAILQ_REMOVE(&w_all, w, witness, w_list);
1489         witness_free(w);
1490
1491         return (1);
1492 }
1493
1494 /*
1495  * Add "child" as a direct child of "parent".  Returns false if
1496  * we fail due to out of memory.
1497  */
1498 static int
1499 insertchild(struct witness *parent, struct witness *child)
1500 {
1501         struct witness_child_list_entry **wcl;
1502
1503         MPASS(child != NULL && parent != NULL);
1504
1505         /*
1506          * Insert "child" after "parent"
1507          */
1508         wcl = &parent->w_children;
1509         while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1510                 wcl = &(*wcl)->wcl_next;
1511         if (*wcl == NULL) {
1512                 *wcl = witness_child_get();
1513                 if (*wcl == NULL)
1514                         return (0);
1515                 w_child_cnt++;
1516         }
1517         (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1518
1519         return (1);
1520 }
1521
1522
1523 static int
1524 itismychild(struct witness *parent, struct witness *child)
1525 {
1526         struct witness_list *list;
1527
1528         MPASS(child != NULL && parent != NULL);
1529         if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1530             (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1531                 panic(
1532                 "%s: parent (%s) and child (%s) are not the same lock type",
1533                     __func__, parent->w_class->lc_name,
1534                     child->w_class->lc_name);
1535
1536         if (!insertchild(parent, child))
1537                 return (0);
1538
1539         if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1540                 list = &w_sleep;
1541         else
1542                 list = &w_spin;
1543         return (1);
1544 }
1545
1546 static void
1547 removechild(struct witness *parent, struct witness *child)
1548 {
1549         struct witness_child_list_entry **wcl, *wcl1;
1550         int i;
1551
1552         for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1553                 for (i = 0; i < (*wcl)->wcl_count; i++)
1554                         if ((*wcl)->wcl_children[i] == child)
1555                                 goto found;
1556         return;
1557 found:
1558         (*wcl)->wcl_count--;
1559         if ((*wcl)->wcl_count > i)
1560                 (*wcl)->wcl_children[i] =
1561                     (*wcl)->wcl_children[(*wcl)->wcl_count];
1562         MPASS((*wcl)->wcl_children[i] != NULL);
1563         if ((*wcl)->wcl_count != 0)
1564                 return;
1565         wcl1 = *wcl;
1566         *wcl = wcl1->wcl_next;
1567         w_child_cnt--;
1568         witness_child_free(wcl1);
1569 }
1570
1571 static int
1572 isitmychild(struct witness *parent, struct witness *child)
1573 {
1574         struct witness_child_list_entry *wcl;
1575         int i;
1576
1577         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1578                 for (i = 0; i < wcl->wcl_count; i++) {
1579                         if (wcl->wcl_children[i] == child)
1580                                 return (1);
1581                 }
1582         }
1583         return (0);
1584 }
1585
1586 static int
1587 isitmydescendant(struct witness *parent, struct witness *child)
1588 {
1589         struct witness_child_list_entry *wcl;
1590         int i, j;
1591
1592         if (isitmychild(parent, child))
1593                 return (1);
1594         j = 0;
1595         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1596                 MPASS(j < 1000);
1597                 for (i = 0; i < wcl->wcl_count; i++) {
1598                         if (isitmydescendant(wcl->wcl_children[i], child))
1599                                 return (1);
1600                 }
1601                 j++;
1602         }
1603         return (0);
1604 }
1605
1606 #ifdef BLESSING
1607 static int
1608 blessed(struct witness *w1, struct witness *w2)
1609 {
1610         int i;
1611         struct witness_blessed *b;
1612
1613         for (i = 0; i < blessed_count; i++) {
1614                 b = &blessed_list[i];
1615                 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1616                         if (strcmp(w2->w_name, b->b_lock2) == 0)
1617                                 return (1);
1618                         continue;
1619                 }
1620                 if (strcmp(w1->w_name, b->b_lock2) == 0)
1621                         if (strcmp(w2->w_name, b->b_lock1) == 0)
1622                                 return (1);
1623         }
1624         return (0);
1625 }
1626 #endif
1627
1628 static struct witness *
1629 witness_get(void)
1630 {
1631         struct witness *w;
1632
1633         if (witness_watch == 0) {
1634                 mtx_unlock_spin(&w_mtx);
1635                 return (NULL);
1636         }
1637         if (STAILQ_EMPTY(&w_free)) {
1638                 witness_watch = 0;
1639                 mtx_unlock_spin(&w_mtx);
1640                 printf("%s: witness exhausted\n", __func__);
1641                 return (NULL);
1642         }
1643         w = STAILQ_FIRST(&w_free);
1644         STAILQ_REMOVE_HEAD(&w_free, w_list);
1645         w_free_cnt--;
1646         bzero(w, sizeof(*w));
1647         return (w);
1648 }
1649
1650 static void
1651 witness_free(struct witness *w)
1652 {
1653
1654         STAILQ_INSERT_HEAD(&w_free, w, w_list);
1655         w_free_cnt++;
1656 }
1657
1658 static struct witness_child_list_entry *
1659 witness_child_get(void)
1660 {
1661         struct witness_child_list_entry *wcl;
1662
1663         if (witness_watch == 0) {
1664                 mtx_unlock_spin(&w_mtx);
1665                 return (NULL);
1666         }
1667         wcl = w_child_free;
1668         if (wcl == NULL) {
1669                 witness_watch = 0;
1670                 mtx_unlock_spin(&w_mtx);
1671                 printf("%s: witness exhausted\n", __func__);
1672                 return (NULL);
1673         }
1674         w_child_free = wcl->wcl_next;
1675         w_child_free_cnt--;
1676         bzero(wcl, sizeof(*wcl));
1677         return (wcl);
1678 }
1679
1680 static void
1681 witness_child_free(struct witness_child_list_entry *wcl)
1682 {
1683
1684         wcl->wcl_next = w_child_free;
1685         w_child_free = wcl;
1686         w_child_free_cnt++;
1687 }
1688
1689 static struct lock_list_entry *
1690 witness_lock_list_get(void)
1691 {
1692         struct lock_list_entry *lle;
1693
1694         if (witness_watch == 0)
1695                 return (NULL);
1696         mtx_lock_spin(&w_mtx);
1697         lle = w_lock_list_free;
1698         if (lle == NULL) {
1699                 witness_watch = 0;
1700                 mtx_unlock_spin(&w_mtx);
1701                 printf("%s: witness exhausted\n", __func__);
1702                 return (NULL);
1703         }
1704         w_lock_list_free = lle->ll_next;
1705         mtx_unlock_spin(&w_mtx);
1706         bzero(lle, sizeof(*lle));
1707         return (lle);
1708 }
1709                 
1710 static void
1711 witness_lock_list_free(struct lock_list_entry *lle)
1712 {
1713
1714         mtx_lock_spin(&w_mtx);
1715         lle->ll_next = w_lock_list_free;
1716         w_lock_list_free = lle;
1717         mtx_unlock_spin(&w_mtx);
1718 }
1719
1720 static struct lock_instance *
1721 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1722 {
1723         struct lock_list_entry *lle;
1724         struct lock_instance *instance;
1725         int i;
1726
1727         for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1728                 for (i = lle->ll_count - 1; i >= 0; i--) {
1729                         instance = &lle->ll_children[i];
1730                         if (instance->li_lock == lock)
1731                                 return (instance);
1732                 }
1733         return (NULL);
1734 }
1735
1736 static void
1737 witness_list_lock(struct lock_instance *instance)
1738 {
1739         struct lock_object *lock;
1740
1741         lock = instance->li_lock;
1742         printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1743             "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
1744         if (lock->lo_type != lock->lo_name)
1745                 printf(" (%s)", lock->lo_type);
1746         printf(" r = %d (%p) locked @ %s:%d\n",
1747             instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1748             instance->li_line);
1749 }
1750
1751 #ifdef DDB
1752 static int
1753 witness_thread_has_locks(struct thread *td)
1754 {
1755
1756         return (td->td_sleeplocks != NULL);
1757 }
1758
1759 static int
1760 witness_proc_has_locks(struct proc *p)
1761 {
1762         struct thread *td;
1763
1764         FOREACH_THREAD_IN_PROC(p, td) {
1765                 if (witness_thread_has_locks(td))
1766                         return (1);
1767         }
1768         return (0);
1769 }
1770 #endif
1771
1772 int
1773 witness_list_locks(struct lock_list_entry **lock_list)
1774 {
1775         struct lock_list_entry *lle;
1776         int i, nheld;
1777
1778         nheld = 0;
1779         for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1780                 for (i = lle->ll_count - 1; i >= 0; i--) {
1781                         witness_list_lock(&lle->ll_children[i]);
1782                         nheld++;
1783                 }
1784         return (nheld);
1785 }
1786
1787 /*
1788  * This is a bit risky at best.  We call this function when we have timed
1789  * out acquiring a spin lock, and we assume that the other CPU is stuck
1790  * with this lock held.  So, we go groveling around in the other CPU's
1791  * per-cpu data to try to find the lock instance for this spin lock to
1792  * see when it was last acquired.
1793  */
1794 void
1795 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1796 {
1797         struct lock_instance *instance;
1798         struct pcpu *pc;
1799
1800         if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1801                 return;
1802         pc = pcpu_find(owner->td_oncpu);
1803         instance = find_instance(pc->pc_spinlocks, lock);
1804         if (instance != NULL)
1805                 witness_list_lock(instance);
1806 }
1807
1808 void
1809 witness_save(struct lock_object *lock, const char **filep, int *linep)
1810 {
1811         struct lock_list_entry *lock_list;
1812         struct lock_instance *instance;
1813         struct lock_class *class;
1814
1815         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1816         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1817                 return;
1818         class = LOCK_CLASS(lock);
1819         if (class->lc_flags & LC_SLEEPLOCK)
1820                 lock_list = curthread->td_sleeplocks;
1821         else {
1822                 if (witness_skipspin)
1823                         return;
1824                 lock_list = PCPU_GET(spinlocks);
1825         }
1826         instance = find_instance(lock_list, lock);
1827         if (instance == NULL)
1828                 panic("%s: lock (%s) %s not locked", __func__,
1829                     class->lc_name, lock->lo_name);
1830         *filep = instance->li_file;
1831         *linep = instance->li_line;
1832 }
1833
1834 void
1835 witness_restore(struct lock_object *lock, const char *file, int line)
1836 {
1837         struct lock_list_entry *lock_list;
1838         struct lock_instance *instance;
1839         struct lock_class *class;
1840
1841         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1842         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1843                 return;
1844         class = LOCK_CLASS(lock);
1845         if (class->lc_flags & LC_SLEEPLOCK)
1846                 lock_list = curthread->td_sleeplocks;
1847         else {
1848                 if (witness_skipspin)
1849                         return;
1850                 lock_list = PCPU_GET(spinlocks);
1851         }
1852         instance = find_instance(lock_list, lock);
1853         if (instance == NULL)
1854                 panic("%s: lock (%s) %s not locked", __func__,
1855                     class->lc_name, lock->lo_name);
1856         lock->lo_witness->w_file = file;
1857         lock->lo_witness->w_line = line;
1858         instance->li_file = file;
1859         instance->li_line = line;
1860 }
1861
1862 void
1863 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1864 {
1865 #ifdef INVARIANT_SUPPORT
1866         struct lock_instance *instance;
1867         struct lock_class *class;
1868
1869         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1870                 return;
1871         class = LOCK_CLASS(lock);
1872         if ((class->lc_flags & LC_SLEEPLOCK) != 0)
1873                 instance = find_instance(curthread->td_sleeplocks, lock);
1874         else if ((class->lc_flags & LC_SPINLOCK) != 0)
1875                 instance = find_instance(PCPU_GET(spinlocks), lock);
1876         else {
1877                 panic("Lock (%s) %s is not sleep or spin!",
1878                     class->lc_name, lock->lo_name);
1879         }
1880         file = fixup_filename(file);
1881         switch (flags) {
1882         case LA_UNLOCKED:
1883                 if (instance != NULL)
1884                         panic("Lock (%s) %s locked @ %s:%d.",
1885                             class->lc_name, lock->lo_name, file, line);
1886                 break;
1887         case LA_LOCKED:
1888         case LA_LOCKED | LA_RECURSED:
1889         case LA_LOCKED | LA_NOTRECURSED:
1890         case LA_SLOCKED:
1891         case LA_SLOCKED | LA_RECURSED:
1892         case LA_SLOCKED | LA_NOTRECURSED:
1893         case LA_XLOCKED:
1894         case LA_XLOCKED | LA_RECURSED:
1895         case LA_XLOCKED | LA_NOTRECURSED:
1896                 if (instance == NULL) {
1897                         panic("Lock (%s) %s not locked @ %s:%d.",
1898                             class->lc_name, lock->lo_name, file, line);
1899                         break;
1900                 }
1901                 if ((flags & LA_XLOCKED) != 0 &&
1902                     (instance->li_flags & LI_EXCLUSIVE) == 0)
1903                         panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1904                             class->lc_name, lock->lo_name, file, line);
1905                 if ((flags & LA_SLOCKED) != 0 &&
1906                     (instance->li_flags & LI_EXCLUSIVE) != 0)
1907                         panic("Lock (%s) %s exclusively locked @ %s:%d.",
1908                             class->lc_name, lock->lo_name, file, line);
1909                 if ((flags & LA_RECURSED) != 0 &&
1910                     (instance->li_flags & LI_RECURSEMASK) == 0)
1911                         panic("Lock (%s) %s not recursed @ %s:%d.",
1912                             class->lc_name, lock->lo_name, file, line);
1913                 if ((flags & LA_NOTRECURSED) != 0 &&
1914                     (instance->li_flags & LI_RECURSEMASK) != 0)
1915                         panic("Lock (%s) %s recursed @ %s:%d.",
1916                             class->lc_name, lock->lo_name, file, line);
1917                 break;
1918         default:
1919                 panic("Invalid lock assertion at %s:%d.", file, line);
1920
1921         }
1922 #endif  /* INVARIANT_SUPPORT */
1923 }
1924
1925 #ifdef DDB
1926 static void
1927 witness_list(struct thread *td)
1928 {
1929
1930         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1931         KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1932
1933         if (witness_watch == 0)
1934                 return;
1935
1936         witness_list_locks(&td->td_sleeplocks);
1937
1938         /*
1939          * We only handle spinlocks if td == curthread.  This is somewhat broken
1940          * if td is currently executing on some other CPU and holds spin locks
1941          * as we won't display those locks.  If we had a MI way of getting
1942          * the per-cpu data for a given cpu then we could use
1943          * td->td_oncpu to get the list of spinlocks for this thread
1944          * and "fix" this.
1945          *
1946          * That still wouldn't really fix this unless we locked sched_lock
1947          * or stopped the other CPU to make sure it wasn't changing the list
1948          * out from under us.  It is probably best to just not try to handle
1949          * threads on other CPU's for now.
1950          */
1951         if (td == curthread && PCPU_GET(spinlocks) != NULL)
1952                 witness_list_locks(PCPU_PTR(spinlocks));
1953 }
1954
1955 DB_SHOW_COMMAND(locks, db_witness_list)
1956 {
1957         struct thread *td;
1958         pid_t pid;
1959         struct proc *p;
1960
1961         if (have_addr) {
1962                 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1963                     ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1964                     ((addr >> 16) % 16) * 10000;
1965                 /* sx_slock(&allproc_lock); */
1966                 FOREACH_PROC_IN_SYSTEM(p) {
1967                         if (p->p_pid == pid)
1968                                 break;
1969                 }
1970                 /* sx_sunlock(&allproc_lock); */
1971                 if (p == NULL) {
1972                         db_printf("pid %d not found\n", pid);
1973                         return;
1974                 }
1975                 FOREACH_THREAD_IN_PROC(p, td) {
1976                         witness_list(td);
1977                 }
1978         } else {
1979                 td = curthread;
1980                 witness_list(td);
1981         }
1982 }
1983
1984 DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1985 {
1986         struct thread *td;
1987         struct proc *p;
1988
1989         /*
1990          * It would be nice to list only threads and processes that actually
1991          * held sleep locks, but that information is currently not exported
1992          * by WITNESS.
1993          */
1994         FOREACH_PROC_IN_SYSTEM(p) {
1995                 if (!witness_proc_has_locks(p))
1996                         continue;
1997                 FOREACH_THREAD_IN_PROC(p, td) {
1998                         if (!witness_thread_has_locks(td))
1999                                 continue;
2000                         db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2001                             p->p_comm, td, td->td_tid);
2002                         witness_list(td);
2003                 }
2004         }
2005 }
2006
2007 DB_SHOW_COMMAND(witness, db_witness_display)
2008 {
2009
2010         witness_display(db_printf);
2011 }
2012 #endif