2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * This module holds the global variables and functions used to maintain
30 * lock_object structures.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "opt_mprof.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
43 #include <sys/limits.h>
45 #include <sys/lock_profile.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
51 #include <sys/sched.h>
53 #include <sys/sysctl.h>
59 #include <machine/cpufunc.h>
62 * Uncomment to validate that spin argument to acquire/release routines matches
63 * the flag in the lock
65 //#define LOCK_PROFILING_DEBUG_SPIN
67 CTASSERT(LOCK_CLASS_MAX == 15);
69 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
71 &lock_class_mtx_sleep,
74 &lock_class_rm_sleepable,
80 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
81 const char *type, int flags)
85 /* Check for double-init and zero object. */
86 KASSERT(flags & LO_NEW || !lock_initialized(lock),
87 ("lock \"%s\" %p already initialized", name, lock));
89 /* Look up lock class to find its index. */
90 for (i = 0; i < LOCK_CLASS_MAX; i++)
91 if (lock_classes[i] == class) {
92 lock->lo_flags = i << LO_CLASSSHIFT;
95 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
97 /* Initialize the lock object. */
99 lock->lo_flags |= flags | LO_INITIALIZED;
100 LOCK_LOG_INIT(lock, 0);
101 WITNESS_INIT(lock, (type != NULL) ? type : name);
105 lock_destroy(struct lock_object *lock)
108 KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock));
109 WITNESS_DESTROY(lock);
110 LOCK_LOG_DESTROY(lock, 0);
111 lock->lo_flags &= ~LO_INITIALIZED;
114 static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
116 static SYSCTL_NODE(_debug_lock, OID_AUTO, delay,
117 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
121 lock_delay(struct lock_delay_arg *la)
123 struct lock_delay_config *lc = la->config;
126 for (i = la->delay; i > 0; i--)
128 la->spin_cnt += la->delay;
131 if (__predict_false(la->delay > (u_int)lc->max))
136 lock_roundup_2(u_int val)
140 for (res = 1; res <= val; res <<= 1)
147 lock_delay_default_init(struct lock_delay_config *lc)
151 lc->max = min(lock_roundup_2(mp_ncpus) * 256, SHRT_MAX);
154 struct lock_delay_config __read_frequently locks_delay;
155 u_short __read_frequently locks_delay_retries;
156 u_short __read_frequently locks_delay_loops;
158 SYSCTL_U16(_debug_lock, OID_AUTO, delay_base, CTLFLAG_RW, &locks_delay.base,
160 SYSCTL_U16(_debug_lock, OID_AUTO, delay_max, CTLFLAG_RW, &locks_delay.max,
162 SYSCTL_U16(_debug_lock, OID_AUTO, delay_retries, CTLFLAG_RW, &locks_delay_retries,
164 SYSCTL_U16(_debug_lock, OID_AUTO, delay_loops, CTLFLAG_RW, &locks_delay_loops,
168 locks_delay_init(void *arg __unused)
171 lock_delay_default_init(&locks_delay);
172 locks_delay_retries = 10;
173 locks_delay_loops = max(10000, locks_delay.max);
175 LOCK_DELAY_SYSINIT(locks_delay_init);
178 DB_SHOW_COMMAND(lock, db_show_lock)
180 struct lock_object *lock;
181 struct lock_class *class;
185 lock = (struct lock_object *)addr;
186 if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
187 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
190 class = LOCK_CLASS(lock);
191 db_printf(" class: %s\n", class->lc_name);
192 db_printf(" name: %s\n", lock->lo_name);
193 class->lc_ddb_show(lock);
197 #ifdef LOCK_PROFILING
200 * One object per-thread for each lock the thread owns. Tracks individual
203 struct lock_profile_object {
204 LIST_ENTRY(lock_profile_object) lpo_link;
205 struct lock_object *lpo_obj;
206 const char *lpo_file;
210 uint64_t lpo_acqtime;
211 uint64_t lpo_waittime;
212 u_int lpo_contest_locking;
216 * One lock_prof for each (file, line, lock object) triple.
219 SLIST_ENTRY(lock_prof) link;
220 struct lock_class *class;
225 uintmax_t cnt_wait_max;
230 uintmax_t cnt_contest_locking;
233 SLIST_HEAD(lphead, lock_prof);
235 #define LPROF_HASH_SIZE 4096
236 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1)
237 #define LPROF_CACHE_SIZE 4096
240 * Array of objects and profs for each type of object for each cpu. Spinlocks
241 * are handled separately because a thread may be preempted and acquire a
242 * spinlock while in the lock profiling code of a non-spinlock. In this way
243 * we only need a critical section to protect the per-cpu lists.
245 struct lock_prof_type {
246 struct lphead lpt_lpalloc;
247 struct lpohead lpt_lpoalloc;
248 struct lphead lpt_hash[LPROF_HASH_SIZE];
249 struct lock_prof lpt_prof[LPROF_CACHE_SIZE];
250 struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
253 struct lock_prof_cpu {
254 struct lock_prof_type lpc_types[2]; /* One for spin one for other. */
257 DPCPU_DEFINE_STATIC(struct lock_prof_cpu, lp);
258 #define LP_CPU_SELF (DPCPU_PTR(lp))
259 #define LP_CPU(cpu) (DPCPU_ID_PTR((cpu), lp))
261 volatile int __read_mostly lock_prof_enable;
262 int __read_mostly lock_contested_only;
263 static volatile int lock_prof_resetting;
265 #define LPROF_SBUF_SIZE 256
267 static int lock_prof_rejected;
268 static int lock_prof_skipspin;
270 #ifndef USE_CPU_NANOSECONDS
278 /* From bintime2timespec */
279 ns = bt.sec * (uint64_t)1000000000;
280 ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
286 lock_prof_init_type(struct lock_prof_type *type)
290 SLIST_INIT(&type->lpt_lpalloc);
291 LIST_INIT(&type->lpt_lpoalloc);
292 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
293 SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
295 LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
301 lock_prof_init(void *arg)
306 lock_prof_init_type(&LP_CPU(cpu)->lpc_types[0]);
307 lock_prof_init_type(&LP_CPU(cpu)->lpc_types[1]);
310 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
313 lock_prof_reset_wait(void)
317 * Spin relinquishing our cpu so that quiesce_all_cpus may
320 while (lock_prof_resetting)
321 sched_relinquish(curthread);
325 lock_prof_reset(void)
327 struct lock_prof_cpu *lpc;
331 * We not only race with acquiring and releasing locks but also
332 * thread exit. To be certain that threads exit without valid head
333 * pointers they must see resetting set before enabled is cleared.
334 * Otherwise a lock may not be removed from a per-thread list due
335 * to disabled being set but not wait for reset() to remove it below.
337 atomic_store_rel_int(&lock_prof_resetting, 1);
338 enabled = lock_prof_enable;
339 lock_prof_enable = 0;
341 * This both publishes lock_prof_enable as disabled and makes sure
342 * everyone else reads it if they are not far enough. We wait for the
345 cpus_fence_seq_cst();
346 quiesce_all_critical();
348 * Some objects may have migrated between CPUs. Clear all links
349 * before we zero the structures. Some items may still be linked
350 * into per-thread lists as well.
354 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
355 LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
356 LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
361 bzero(lpc, sizeof(*lpc));
362 lock_prof_init_type(&lpc->lpc_types[0]);
363 lock_prof_init_type(&lpc->lpc_types[1]);
366 * Paired with the fence from cpus_fence_seq_cst()
368 atomic_store_rel_int(&lock_prof_resetting, 0);
369 lock_prof_enable = enabled;
373 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
377 for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
379 "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
380 lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
381 lp->cnt_wait / 1000, lp->cnt_cur,
382 lp->cnt_cur == 0 ? (uintmax_t)0 :
383 lp->cnt_tot / (lp->cnt_cur * 1000),
384 lp->cnt_cur == 0 ? (uintmax_t)0 :
385 lp->cnt_wait / (lp->cnt_cur * 1000),
386 (uintmax_t)0, lp->cnt_contest_locking,
387 p, lp->line, lp->class->lc_name, lp->name);
391 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
394 struct lock_prof_type *type;
398 dst->file = match->file;
399 dst->line = match->line;
400 dst->class = match->class;
401 dst->name = match->name;
404 type = &LP_CPU(cpu)->lpc_types[spin];
405 SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
408 if (l->file != match->file || l->line != match->line ||
409 l->name != match->name)
412 if (l->cnt_max > dst->cnt_max)
413 dst->cnt_max = l->cnt_max;
414 if (l->cnt_wait_max > dst->cnt_wait_max)
415 dst->cnt_wait_max = l->cnt_wait_max;
416 dst->cnt_tot += l->cnt_tot;
417 dst->cnt_wait += l->cnt_wait;
418 dst->cnt_cur += l->cnt_cur;
419 dst->cnt_contest_locking += l->cnt_contest_locking;
425 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
431 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
432 SLIST_FOREACH(l, &type->lpt_hash[i], link) {
433 struct lock_prof lp = {};
437 lock_prof_sum(l, &lp, i, spin, t);
438 lock_prof_output(&lp, sb);
444 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
450 error = sysctl_wire_old_buffer(req, 0);
453 sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
454 sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
455 "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
456 enabled = lock_prof_enable;
457 lock_prof_enable = 0;
459 * See the comment in lock_prof_reset
461 cpus_fence_seq_cst();
462 quiesce_all_critical();
465 lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[0], sb, 0, t);
466 lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[1], sb, 1, t);
468 atomic_thread_fence_rel();
469 lock_prof_enable = enabled;
471 error = sbuf_finish(sb);
472 /* Output a trailing NUL. */
474 error = SYSCTL_OUT(req, "", 1);
480 enable_lock_prof(SYSCTL_HANDLER_ARGS)
484 v = lock_prof_enable;
485 error = sysctl_handle_int(oidp, &v, v, req);
488 if (req->newptr == NULL)
490 if (v == lock_prof_enable)
494 lock_prof_enable = !!v;
500 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
505 error = sysctl_handle_int(oidp, &v, 0, req);
508 if (req->newptr == NULL)
517 static struct lock_prof *
518 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
521 const char *unknown = "(unknown)";
522 struct lock_prof_type *type;
523 struct lock_prof *lp;
529 if (p == NULL || *p == '\0')
531 hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
532 hash &= LPROF_HASH_MASK;
533 type = &LP_CPU_SELF->lpc_types[spin];
534 head = &type->lpt_hash[hash];
535 SLIST_FOREACH(lp, head, link) {
536 if (lp->line == line && lp->file == p &&
537 lp->name == lo->lo_name)
540 lp = SLIST_FIRST(&type->lpt_lpalloc);
542 lock_prof_rejected++;
545 SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
548 lp->class = LOCK_CLASS(lo);
549 lp->name = lo->lo_name;
550 SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
554 static struct lock_profile_object *
555 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
558 struct lock_profile_object *l;
559 struct lock_prof_type *type;
560 struct lpohead *head;
562 head = &curthread->td_lprof[spin];
563 LIST_FOREACH(l, head, lpo_link)
564 if (l->lpo_obj == lo && l->lpo_file == file &&
567 type = &LP_CPU_SELF->lpc_types[spin];
568 l = LIST_FIRST(&type->lpt_lpoalloc);
570 lock_prof_rejected++;
573 LIST_REMOVE(l, lpo_link);
578 LIST_INSERT_HEAD(head, l, lpo_link);
584 lock_profile_obtain_lock_success(struct lock_object *lo, bool spin,
585 int contested, uint64_t waittime, const char *file, int line)
587 struct lock_profile_object *l;
589 #ifdef LOCK_PROFILING_DEBUG_SPIN
590 bool is_spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK);
591 if ((spin && !is_spin) || (!spin && is_spin))
592 printf("%s: lock %s spin mismatch (arg %d, flag %d)\n", __func__,
593 lo->lo_name, spin, is_spin);
596 /* don't reset the timer when/if recursing */
597 if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
599 if (lock_contested_only && !contested)
601 if (spin && lock_prof_skipspin == 1)
604 if (SCHEDULER_STOPPED())
608 /* Recheck enabled now that we're in a critical section. */
609 if (lock_prof_enable == 0)
611 l = lock_profile_object_lookup(lo, spin, file, line);
615 if (++l->lpo_ref > 1)
617 l->lpo_contest_locking = contested;
618 l->lpo_acqtime = nanoseconds();
619 if (waittime && (l->lpo_acqtime > waittime))
620 l->lpo_waittime = l->lpo_acqtime - waittime;
625 * Paired with cpus_fence_seq_cst().
627 atomic_thread_fence_rel();
632 lock_profile_thread_exit(struct thread *td)
635 struct lock_profile_object *l;
637 MPASS(curthread->td_critnest == 0);
640 * If lock profiling was disabled we have to wait for reset to
641 * clear our pointers before we can exit safely.
643 lock_prof_reset_wait();
645 LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
646 printf("thread still holds lock acquired at %s:%d\n",
647 l->lpo_file, l->lpo_line);
648 LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
649 printf("thread still holds lock acquired at %s:%d\n",
650 l->lpo_file, l->lpo_line);
652 MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
653 MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
657 lock_profile_release_lock(struct lock_object *lo, bool spin)
659 struct lock_profile_object *l;
660 struct lock_prof_type *type;
661 struct lock_prof *lp;
662 uint64_t curtime, holdtime;
663 struct lpohead *head;
665 #ifdef LOCK_PROFILING_DEBUG_SPIN
666 bool is_spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK);
667 if ((spin && !is_spin) || (!spin && is_spin))
668 printf("%s: lock %s spin mismatch (arg %d, flag %d)\n", __func__,
669 lo->lo_name, spin, is_spin);
672 if (lo->lo_flags & LO_NOPROFILE)
674 head = &curthread->td_lprof[spin];
675 if (LIST_FIRST(head) == NULL)
677 if (SCHEDULER_STOPPED())
680 /* Recheck enabled now that we're in a critical section. */
681 if (lock_prof_enable == 0 && lock_prof_resetting == 1)
684 * If lock profiling is not enabled we still want to remove the
685 * lpo from our queue.
687 LIST_FOREACH(l, head, lpo_link)
688 if (l->lpo_obj == lo)
692 if (--l->lpo_ref > 0)
694 lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
697 curtime = nanoseconds();
698 if (curtime < l->lpo_acqtime)
700 holdtime = curtime - l->lpo_acqtime;
703 * Record if the lock has been held longer now than ever
706 if (holdtime > lp->cnt_max)
707 lp->cnt_max = holdtime;
708 if (l->lpo_waittime > lp->cnt_wait_max)
709 lp->cnt_wait_max = l->lpo_waittime;
710 lp->cnt_tot += holdtime;
711 lp->cnt_wait += l->lpo_waittime;
712 lp->cnt_contest_locking += l->lpo_contest_locking;
713 lp->cnt_cur += l->lpo_cnt;
715 LIST_REMOVE(l, lpo_link);
716 type = &LP_CPU_SELF->lpc_types[spin];
717 LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
720 * Paired with cpus_fence_seq_cst().
722 atomic_thread_fence_rel();
726 static SYSCTL_NODE(_debug_lock, OID_AUTO, prof,
727 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
729 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
730 &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
731 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
732 &lock_prof_rejected, 0, "Number of rejected profiling records");
733 SYSCTL_INT(_debug_lock_prof, OID_AUTO, contested_only, CTLFLAG_RW,
734 &lock_contested_only, 0, "Only profile contested acquires");
735 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats,
736 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
737 dump_lock_prof_stats, "A",
738 "Lock profiling statistics");
739 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset,
740 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
741 reset_lock_prof_stats, "I",
742 "Reset lock profiling statistics");
743 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable,
744 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
745 enable_lock_prof, "I",
746 "Enable lock profiling");