]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_lock.c
track lock class name in a way that doesn't break WITNESS
[FreeBSD/FreeBSD.git] / sys / kern / subr_lock.c
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 /*
31  * This module holds the global variables and functions used to maintain
32  * lock_object structures.
33  */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "opt_ddb.h"
39 #include "opt_mprof.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/ktr.h>
44 #include <sys/linker_set.h>
45 #include <sys/lock.h>
46 #include <sys/sbuf.h>
47 #include <sys/sysctl.h>
48 #include <sys/lock_profile.h>
49
50 #ifdef DDB
51 #include <ddb/ddb.h>
52 #endif
53
54 CTASSERT(LOCK_CLASS_MAX == 15);
55
56 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
57         &lock_class_mtx_spin,
58         &lock_class_mtx_sleep,
59         &lock_class_sx,
60         &lock_class_rw,
61         &lock_class_lockmgr,
62 };
63
64 #ifdef LOCK_PROFILING
65 #include <machine/cpufunc.h>
66
67 SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
68 SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
69 int lock_prof_enable = 0;
70 SYSCTL_INT(_debug_lock_prof, OID_AUTO, enable, CTLFLAG_RW,
71     &lock_prof_enable, 0, "Enable lock profiling");
72
73 /*
74  * lprof_buf is a static pool of profiling records to avoid possible
75  * reentrance of the memory allocation functions.
76  *
77  * Note: NUM_LPROF_BUFFERS must be smaller than LPROF_HASH_SIZE.
78  */
79 struct lock_prof lprof_buf[LPROF_HASH_SIZE];
80 static int allocated_lprof_buf;
81 struct mtx lprof_locks[LPROF_LOCK_SIZE];
82
83
84 /* SWAG: sbuf size = avg stat. line size * number of locks */
85 #define LPROF_SBUF_SIZE         256 * 400
86
87 static int lock_prof_acquisitions;
88 SYSCTL_INT(_debug_lock_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
89     &lock_prof_acquisitions, 0, "Number of lock acquistions recorded");
90 static int lock_prof_records;
91 SYSCTL_INT(_debug_lock_prof, OID_AUTO, records, CTLFLAG_RD,
92     &lock_prof_records, 0, "Number of profiling records");
93 static int lock_prof_maxrecords = LPROF_HASH_SIZE;
94 SYSCTL_INT(_debug_lock_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
95     &lock_prof_maxrecords, 0, "Maximum number of profiling records");
96 static int lock_prof_rejected;
97 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
98     &lock_prof_rejected, 0, "Number of rejected profiling records");
99 static int lock_prof_hashsize = LPROF_HASH_SIZE;
100 SYSCTL_INT(_debug_lock_prof, OID_AUTO, hashsize, CTLFLAG_RD,
101     &lock_prof_hashsize, 0, "Hash size");
102 static int lock_prof_collisions = 0;
103 SYSCTL_INT(_debug_lock_prof, OID_AUTO, collisions, CTLFLAG_RD,
104     &lock_prof_collisions, 0, "Number of hash collisions");
105
106 #ifndef USE_CPU_NANOSECONDS
107 u_int64_t
108 nanoseconds(void)
109 {
110         struct timespec tv;
111
112         nanotime(&tv);
113         return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
114 }
115 #endif
116
117 static int
118 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
119 {
120         struct sbuf *sb;
121         int error, i;
122         static int multiplier = 1;
123         const char *p;
124
125         if (allocated_lprof_buf == 0)
126                 return (SYSCTL_OUT(req, "No locking recorded",
127                     sizeof("No locking recorded")));
128
129 retry_sbufops:
130         sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
131         sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
132             "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
133         for (i = 0; i < LPROF_HASH_SIZE; ++i) {
134                 if (lprof_buf[i].name == NULL)
135                         continue;
136                 for (p = lprof_buf[i].file;
137                         p != NULL && strncmp(p, "../", 3) == 0; p += 3)
138                                 /* nothing */ ;
139                 sbuf_printf(sb, "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n",
140                     lprof_buf[i].cnt_max / 1000,
141                     lprof_buf[i].cnt_tot / 1000,
142                     lprof_buf[i].cnt_wait / 1000,
143                     lprof_buf[i].cnt_cur,
144                     lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
145                         lprof_buf[i].cnt_tot / (lprof_buf[i].cnt_cur * 1000),
146                     lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
147                         lprof_buf[i].cnt_wait / (lprof_buf[i].cnt_cur * 1000),
148                     lprof_buf[i].cnt_contest_holding,
149                     lprof_buf[i].cnt_contest_locking,
150                     p, lprof_buf[i].line, 
151                             lprof_buf[i].type,
152                             lprof_buf[i].name);
153                 if (sbuf_overflowed(sb)) {
154                         sbuf_delete(sb);
155                         multiplier++;
156                         goto retry_sbufops;
157                 }
158         }
159
160         sbuf_finish(sb);
161         error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
162         sbuf_delete(sb);
163         return (error);
164 }
165 static int
166 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
167 {
168         int error, v;
169
170         if (allocated_lprof_buf == 0)
171                 return (0);
172
173         v = 0;
174         error = sysctl_handle_int(oidp, &v, 0, req);
175         if (error)
176                 return (error);
177         if (req->newptr == NULL)
178                 return (error);
179         if (v == 0)
180                 return (0);
181
182         bzero(lprof_buf, LPROF_HASH_SIZE*sizeof(*lprof_buf));
183         allocated_lprof_buf = 0;
184         return (0);
185 }
186
187 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
188     NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
189
190 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
191     NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
192 #endif
193
194 void
195 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
196     const char *type, int flags)
197 {
198         int i;
199
200         /* Check for double-init and zero object. */
201         KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
202             name, lock));
203
204         /* Look up lock class to find its index. */
205         for (i = 0; i < LOCK_CLASS_MAX; i++)
206                 if (lock_classes[i] == class) {
207                         lock->lo_flags = i << LO_CLASSSHIFT;
208                         break;
209                 }
210         KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
211
212         /* Initialize the lock object. */
213         lock->lo_name = name;
214         lock->lo_type = type != NULL ? type : name;
215         lock->lo_flags |= flags | LO_INITIALIZED;
216         LOCK_LOG_INIT(lock, 0);
217         WITNESS_INIT(lock);
218 }
219
220 void
221 lock_destroy(struct lock_object *lock)
222 {
223
224         KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
225         WITNESS_DESTROY(lock);
226         LOCK_LOG_DESTROY(lock, 0);
227         lock->lo_flags &= ~LO_INITIALIZED;
228 }
229
230 #ifdef DDB
231 DB_SHOW_COMMAND(lock, db_show_lock)
232 {
233         struct lock_object *lock;
234         struct lock_class *class;
235
236         if (!have_addr)
237                 return;
238         lock = (struct lock_object *)addr;
239         if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
240                 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
241                 return;
242         }
243         class = LOCK_CLASS(lock);
244         db_printf(" class: %s\n", class->lc_name);
245         db_printf(" name: %s\n", lock->lo_name);
246         if (lock->lo_type && lock->lo_type != lock->lo_name)
247                 db_printf(" type: %s\n", lock->lo_type);
248         class->lc_ddb_show(lock);
249 }
250 #endif
251
252 #ifdef LOCK_PROFILING
253 void _lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, con\
254 st char *file, int line)
255 {
256         struct lock_profile_object *l = &lo->lo_profile_obj;
257
258         /* don't reset the timer when/if recursing */
259         if (l->lpo_acqtime == 0) {
260                 l->lpo_filename = file;
261                 l->lpo_lineno = line;
262                 l->lpo_acqtime = nanoseconds(); 
263                 if (waittime) {
264                         if (l->lpo_acqtime > waittime)
265                                 l->lpo_waittime = l->lpo_acqtime - waittime;
266                 }
267         }
268 }
269
270 void _lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart)
271 {
272         struct lock_profile_object *l = &lo->lo_profile_obj;
273
274         if (lock_prof_enable && waitstart) {
275                 uint64_t now, waittime;
276                 struct lock_prof *mpp;
277                 u_int hash;
278                 const char *p = l->lpo_filename;
279                 int collision = 0;
280                 now = nanoseconds();
281                 if (now < waitstart)
282                         return;
283                 waittime = now - waitstart;
284                 hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
285
286                 mpp = &lprof_buf[hash];
287                 while (mpp->name != NULL) {
288                         if (mpp->line == l->lpo_lineno &&
289                           mpp->file == p &&
290                           mpp->namehash == l->lpo_namehash)
291                                 break;
292                         /* If the lprof_hash entry is allocated to someone else, try the next one */
293                         collision = 1;
294                         CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file, mpp->line, mpp->name, mpp->namehash);
295                         hash = (hash + 1) & LPROF_HASH_MASK;
296                         mpp = &lprof_buf[hash];
297                 }
298                 if (mpp->name == NULL) {
299                         int buf;
300
301                         buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
302                         /* Just exit if we cannot get a trace buffer */
303                         if (buf >= LPROF_HASH_SIZE) {
304                                 ++lock_prof_rejected;
305                                 return;
306                         }
307                         mpp->file = p;
308                         mpp->line = l->lpo_lineno;
309                         mpp->namehash = l->lpo_namehash;
310                         mpp->type = l->lpo_type;
311                         mpp->name = lo->lo_name;
312                         if (collision)
313                                 ++lock_prof_collisions;
314                         /* We might have raced someone else but who cares, they'll try again next time */
315                         ++lock_prof_records;
316                 }
317                 LPROF_LOCK(hash);
318                 mpp->cnt_wait += waittime;
319                 LPROF_UNLOCK(hash);
320         }
321 }
322
323 void _lock_profile_release_lock(struct lock_object *lo)
324 {
325         struct lock_profile_object *l = &lo->lo_profile_obj;
326
327         if (l->lpo_acqtime && !(lo->lo_flags & LO_NOPROFILE)) {
328                 const char *unknown = "(unknown)";
329                 u_int64_t acqtime, now, waittime;
330                 struct lock_prof *mpp;
331                 u_int hash;
332                 const char *p = l->lpo_filename;
333                 int collision = 0;
334
335                 now = nanoseconds();
336                 acqtime = l->lpo_acqtime;
337                 waittime = l->lpo_waittime;
338                 if (now <= acqtime)
339                         return;
340                 if (p == NULL || *p == '\0')
341                         p = unknown;
342                 hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
343                 CTR5(KTR_SPARE1, "Hashing %s(%x) %s:%d to %d", l->lpo_filename, 
344                      l->lpo_namehash, p, l->lpo_lineno, hash);
345                 mpp = &lprof_buf[hash];
346                 while (mpp->name != NULL) {
347                         if (mpp->line == l->lpo_lineno &&
348                           mpp->file == p &&
349                           mpp->namehash == l->lpo_namehash)
350                                 break;
351                         /* If the lprof_hash entry is allocated to someone 
352                          * else, try the next one 
353                          */
354                         collision = 1;
355                         CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file, 
356                              mpp->line, mpp->name, mpp->namehash);
357                         hash = (hash + 1) & LPROF_HASH_MASK;
358                         mpp = &lprof_buf[hash];
359                 }
360                 if (mpp->name == NULL) {
361                         int buf;
362
363                         buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
364                         /* Just exit if we cannot get a trace buffer */
365                         if (buf >= LPROF_HASH_SIZE) {
366                                 ++lock_prof_rejected;
367                                 return;
368                         }
369                         mpp->file = p;
370                         mpp->line = l->lpo_lineno;
371                         mpp->namehash = l->lpo_namehash;
372                         mpp->type = l->lpo_type;
373                         mpp->name = lo->lo_name;
374
375                         if (collision)
376                                 ++lock_prof_collisions;
377                         
378                         /* 
379                          * We might have raced someone else but who cares, 
380                          * they'll try again next time 
381                          */
382                         ++lock_prof_records;
383                 }
384                 LPROF_LOCK(hash);
385                 /*
386                  * Record if the lock has been held longer now than ever
387                  * before.
388                  */
389                 if (now - acqtime > mpp->cnt_max)
390                         mpp->cnt_max = now - acqtime;
391                 mpp->cnt_tot += now - acqtime;
392                 mpp->cnt_wait += waittime;
393                 mpp->cnt_cur++;
394                 /*
395                  * There's a small race, really we should cmpxchg
396                  * 0 with the current value, but that would bill
397                  * the contention to the wrong lock instance if
398                  * it followed this also.
399                  */
400                 mpp->cnt_contest_holding += l->lpo_contest_holding;
401                 mpp->cnt_contest_locking += l->lpo_contest_locking;
402                 LPROF_UNLOCK(hash);
403
404         }
405         l->lpo_acqtime = 0;
406         l->lpo_waittime = 0;
407         l->lpo_contest_locking = 0;
408         l->lpo_contest_holding = 0;
409 }
410 #endif