2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_mprof.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
46 #include <sys/lock_profile.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
52 #include <sys/sched.h>
54 #include <sys/sysctl.h>
60 #include <machine/cpufunc.h>
62 CTASSERT(LOCK_CLASS_MAX == 15);
64 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
66 &lock_class_mtx_sleep,
74 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
75 const char *type, int flags)
79 /* Check for double-init and zero object. */
80 KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
83 /* Look up lock class to find its index. */
84 for (i = 0; i < LOCK_CLASS_MAX; i++)
85 if (lock_classes[i] == class) {
86 lock->lo_flags = i << LO_CLASSSHIFT;
89 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
91 /* Initialize the lock object. */
93 lock->lo_flags |= flags | LO_INITIALIZED;
94 LOCK_LOG_INIT(lock, 0);
95 WITNESS_INIT(lock, (type != NULL) ? type : name);
99 lock_destroy(struct lock_object *lock)
102 KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
103 WITNESS_DESTROY(lock);
104 LOCK_LOG_DESTROY(lock, 0);
105 lock->lo_flags &= ~LO_INITIALIZED;
109 DB_SHOW_COMMAND(lock, db_show_lock)
111 struct lock_object *lock;
112 struct lock_class *class;
116 lock = (struct lock_object *)addr;
117 if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
118 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
121 class = LOCK_CLASS(lock);
122 db_printf(" class: %s\n", class->lc_name);
123 db_printf(" name: %s\n", lock->lo_name);
124 class->lc_ddb_show(lock);
128 #ifdef LOCK_PROFILING
131 * One object per-thread for each lock the thread owns. Tracks individual
134 struct lock_profile_object {
135 LIST_ENTRY(lock_profile_object) lpo_link;
136 struct lock_object *lpo_obj;
137 const char *lpo_file;
141 u_int64_t lpo_acqtime;
142 u_int64_t lpo_waittime;
143 u_int lpo_contest_locking;
147 * One lock_prof for each (file, line, lock object) triple.
150 SLIST_ENTRY(lock_prof) link;
151 struct lock_class *class;
156 uintmax_t cnt_wait_max;
161 uintmax_t cnt_contest_locking;
164 SLIST_HEAD(lphead, lock_prof);
166 #define LPROF_HASH_SIZE 4096
167 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1)
168 #define LPROF_CACHE_SIZE 4096
171 * Array of objects and profs for each type of object for each cpu. Spinlocks
172 * are handled separately because a thread may be preempted and acquire a
173 * spinlock while in the lock profiling code of a non-spinlock. In this way
174 * we only need a critical section to protect the per-cpu lists.
176 struct lock_prof_type {
177 struct lphead lpt_lpalloc;
178 struct lpohead lpt_lpoalloc;
179 struct lphead lpt_hash[LPROF_HASH_SIZE];
180 struct lock_prof lpt_prof[LPROF_CACHE_SIZE];
181 struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
184 struct lock_prof_cpu {
185 struct lock_prof_type lpc_types[2]; /* One for spin one for other. */
188 struct lock_prof_cpu *lp_cpu[MAXCPU];
190 volatile int lock_prof_enable = 0;
191 static volatile int lock_prof_resetting;
193 /* SWAG: sbuf size = avg stat. line size * number of locks */
194 #define LPROF_SBUF_SIZE 256 * 400
196 static int lock_prof_rejected;
197 static int lock_prof_skipspin;
198 static int lock_prof_skipcount;
200 #ifndef USE_CPU_NANOSECONDS
208 /* From bintime2timespec */
209 ns = bt.sec * (u_int64_t)1000000000;
210 ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
216 lock_prof_init_type(struct lock_prof_type *type)
220 SLIST_INIT(&type->lpt_lpalloc);
221 LIST_INIT(&type->lpt_lpoalloc);
222 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
223 SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
225 LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
231 lock_prof_init(void *arg)
235 for (cpu = 0; cpu <= mp_maxid; cpu++) {
236 lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
238 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
239 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
242 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
245 * To be certain that lock profiling has idled on all cpus before we
246 * reset, we schedule the resetting thread on all active cpus. Since
247 * all operations happen within critical sections we can be sure that
248 * it is safe to zero the profiling structures.
266 lock_prof_reset_wait(void)
270 * Spin relinquishing our cpu so that lock_prof_idle may
273 while (lock_prof_resetting)
274 sched_relinquish(curthread);
278 lock_prof_reset(void)
280 struct lock_prof_cpu *lpc;
284 * We not only race with acquiring and releasing locks but also
285 * thread exit. To be certain that threads exit without valid head
286 * pointers they must see resetting set before enabled is cleared.
287 * Otherwise a lock may not be removed from a per-thread list due
288 * to disabled being set but not wait for reset() to remove it below.
290 atomic_store_rel_int(&lock_prof_resetting, 1);
291 enabled = lock_prof_enable;
292 lock_prof_enable = 0;
295 * Some objects may have migrated between CPUs. Clear all links
296 * before we zero the structures. Some items may still be linked
297 * into per-thread lists as well.
299 for (cpu = 0; cpu <= mp_maxid; cpu++) {
301 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
302 LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
303 LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
306 for (cpu = 0; cpu <= mp_maxid; cpu++) {
308 bzero(lpc, sizeof(*lpc));
309 lock_prof_init_type(&lpc->lpc_types[0]);
310 lock_prof_init_type(&lpc->lpc_types[1]);
312 atomic_store_rel_int(&lock_prof_resetting, 0);
313 lock_prof_enable = enabled;
317 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
321 for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
323 "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
324 lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
325 lp->cnt_wait / 1000, lp->cnt_cur,
326 lp->cnt_cur == 0 ? (uintmax_t)0 :
327 lp->cnt_tot / (lp->cnt_cur * 1000),
328 lp->cnt_cur == 0 ? (uintmax_t)0 :
329 lp->cnt_wait / (lp->cnt_cur * 1000),
330 (uintmax_t)0, lp->cnt_contest_locking,
331 p, lp->line, lp->class->lc_name, lp->name);
335 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
338 struct lock_prof_type *type;
342 dst->file = match->file;
343 dst->line = match->line;
344 dst->class = match->class;
345 dst->name = match->name;
347 for (cpu = 0; cpu <= mp_maxid; cpu++) {
348 if (lp_cpu[cpu] == NULL)
350 type = &lp_cpu[cpu]->lpc_types[spin];
351 SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
354 if (l->file != match->file || l->line != match->line ||
355 l->name != match->name)
358 if (l->cnt_max > dst->cnt_max)
359 dst->cnt_max = l->cnt_max;
360 if (l->cnt_wait_max > dst->cnt_wait_max)
361 dst->cnt_wait_max = l->cnt_wait_max;
362 dst->cnt_tot += l->cnt_tot;
363 dst->cnt_wait += l->cnt_wait;
364 dst->cnt_cur += l->cnt_cur;
365 dst->cnt_contest_locking += l->cnt_contest_locking;
372 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
378 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
379 SLIST_FOREACH(l, &type->lpt_hash[i], link) {
380 struct lock_prof lp = {};
384 lock_prof_sum(l, &lp, i, spin, t);
385 lock_prof_output(&lp, sb);
386 if (sbuf_overflowed(sb))
393 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
395 static int multiplier = 1;
401 sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
402 sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
403 "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
404 enabled = lock_prof_enable;
405 lock_prof_enable = 0;
408 for (cpu = 0; cpu <= mp_maxid; cpu++) {
409 if (lp_cpu[cpu] == NULL)
411 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
412 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
413 if (sbuf_overflowed(sb)) {
419 lock_prof_enable = enabled;
422 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
428 enable_lock_prof(SYSCTL_HANDLER_ARGS)
432 v = lock_prof_enable;
433 error = sysctl_handle_int(oidp, &v, v, req);
436 if (req->newptr == NULL)
438 if (v == lock_prof_enable)
442 lock_prof_enable = !!v;
448 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
453 error = sysctl_handle_int(oidp, &v, 0, req);
456 if (req->newptr == NULL)
465 static struct lock_prof *
466 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
469 const char *unknown = "(unknown)";
470 struct lock_prof_type *type;
471 struct lock_prof *lp;
477 if (p == NULL || *p == '\0')
479 hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
480 hash &= LPROF_HASH_MASK;
481 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
482 head = &type->lpt_hash[hash];
483 SLIST_FOREACH(lp, head, link) {
484 if (lp->line == line && lp->file == p &&
485 lp->name == lo->lo_name)
489 lp = SLIST_FIRST(&type->lpt_lpalloc);
491 lock_prof_rejected++;
494 SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
497 lp->class = LOCK_CLASS(lo);
498 lp->name = lo->lo_name;
499 SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
503 static struct lock_profile_object *
504 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
507 struct lock_profile_object *l;
508 struct lock_prof_type *type;
509 struct lpohead *head;
511 head = &curthread->td_lprof[spin];
512 LIST_FOREACH(l, head, lpo_link)
513 if (l->lpo_obj == lo && l->lpo_file == file &&
516 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
517 l = LIST_FIRST(&type->lpt_lpoalloc);
519 lock_prof_rejected++;
522 LIST_REMOVE(l, lpo_link);
527 LIST_INSERT_HEAD(head, l, lpo_link);
533 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
534 uint64_t waittime, const char *file, int line)
536 static int lock_prof_count;
537 struct lock_profile_object *l;
540 if (SCHEDULER_STOPPED())
543 /* don't reset the timer when/if recursing */
544 if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
546 if (lock_prof_skipcount &&
547 (++lock_prof_count % lock_prof_skipcount) != 0)
549 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
550 if (spin && lock_prof_skipspin == 1)
553 /* Recheck enabled now that we're in a critical section. */
554 if (lock_prof_enable == 0)
556 l = lock_profile_object_lookup(lo, spin, file, line);
560 if (++l->lpo_ref > 1)
562 l->lpo_contest_locking = contested;
563 l->lpo_acqtime = nanoseconds();
564 if (waittime && (l->lpo_acqtime > waittime))
565 l->lpo_waittime = l->lpo_acqtime - waittime;
573 lock_profile_thread_exit(struct thread *td)
576 struct lock_profile_object *l;
578 MPASS(curthread->td_critnest == 0);
581 * If lock profiling was disabled we have to wait for reset to
582 * clear our pointers before we can exit safely.
584 lock_prof_reset_wait();
586 LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
587 printf("thread still holds lock acquired at %s:%d\n",
588 l->lpo_file, l->lpo_line);
589 LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
590 printf("thread still holds lock acquired at %s:%d\n",
591 l->lpo_file, l->lpo_line);
593 MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
594 MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
598 lock_profile_release_lock(struct lock_object *lo)
600 struct lock_profile_object *l;
601 struct lock_prof_type *type;
602 struct lock_prof *lp;
603 u_int64_t curtime, holdtime;
604 struct lpohead *head;
607 if (SCHEDULER_STOPPED())
609 if (lo->lo_flags & LO_NOPROFILE)
611 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
612 head = &curthread->td_lprof[spin];
613 if (LIST_FIRST(head) == NULL)
616 /* Recheck enabled now that we're in a critical section. */
617 if (lock_prof_enable == 0 && lock_prof_resetting == 1)
620 * If lock profiling is not enabled we still want to remove the
621 * lpo from our queue.
623 LIST_FOREACH(l, head, lpo_link)
624 if (l->lpo_obj == lo)
628 if (--l->lpo_ref > 0)
630 lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
633 curtime = nanoseconds();
634 if (curtime < l->lpo_acqtime)
636 holdtime = curtime - l->lpo_acqtime;
639 * Record if the lock has been held longer now than ever
642 if (holdtime > lp->cnt_max)
643 lp->cnt_max = holdtime;
644 if (l->lpo_waittime > lp->cnt_wait_max)
645 lp->cnt_wait_max = l->lpo_waittime;
646 lp->cnt_tot += holdtime;
647 lp->cnt_wait += l->lpo_waittime;
648 lp->cnt_contest_locking += l->lpo_contest_locking;
649 lp->cnt_cur += l->lpo_cnt;
651 LIST_REMOVE(l, lpo_link);
652 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
653 LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
658 SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
659 SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
660 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
661 &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
662 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
663 &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
664 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
665 &lock_prof_rejected, 0, "Number of rejected profiling records");
666 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
667 NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
668 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
669 NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
670 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
671 NULL, 0, enable_lock_prof, "I", "Enable lock profiling");