]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - contrib/ntp/sntp/libevent/evthread.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / contrib / ntp / sntp / libevent / evthread.c
1 /*
2  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29
30 #ifndef EVENT__DISABLE_THREAD_SUPPORT
31
32 #include "event2/thread.h"
33
34 #include <stdlib.h>
35 #include <string.h>
36
37 #include "log-internal.h"
38 #include "mm-internal.h"
39 #include "util-internal.h"
40 #include "evthread-internal.h"
41
42 #ifdef EVTHREAD_EXPOSE_STRUCTS
43 #define GLOBAL
44 #else
45 #define GLOBAL static
46 #endif
47
48 /* globals */
49 GLOBAL int evthread_lock_debugging_enabled_ = 0;
50 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
51         0, 0, NULL, NULL, NULL, NULL
52 };
53 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
54 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
55         0, NULL, NULL, NULL, NULL
56 };
57
58 /* Used for debugging */
59 static struct evthread_lock_callbacks original_lock_fns_ = {
60         0, 0, NULL, NULL, NULL, NULL
61 };
62 static struct evthread_condition_callbacks original_cond_fns_ = {
63         0, NULL, NULL, NULL, NULL
64 };
65
66 void
67 evthread_set_id_callback(unsigned long (*id_fn)(void))
68 {
69         evthread_id_fn_ = id_fn;
70 }
71
72 struct evthread_lock_callbacks *evthread_get_lock_callbacks()
73 {
74         return evthread_lock_debugging_enabled_
75             ? &original_lock_fns_ : &evthread_lock_fns_;
76 }
77 struct evthread_condition_callbacks *evthread_get_condition_callbacks()
78 {
79         return evthread_lock_debugging_enabled_
80             ? &original_cond_fns_ : &evthread_cond_fns_;
81 }
82 void evthreadimpl_disable_lock_debugging_(void)
83 {
84         evthread_lock_debugging_enabled_ = 0;
85 }
86
87 int
88 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
89 {
90         struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
91
92         if (!cbs) {
93                 if (target->alloc)
94                         event_warnx("Trying to disable lock functions after "
95                             "they have been set up will probaby not work.");
96                 memset(target, 0, sizeof(evthread_lock_fns_));
97                 return 0;
98         }
99         if (target->alloc) {
100                 /* Uh oh; we already had locking callbacks set up.*/
101                 if (target->lock_api_version == cbs->lock_api_version &&
102                         target->supported_locktypes == cbs->supported_locktypes &&
103                         target->alloc == cbs->alloc &&
104                         target->free == cbs->free &&
105                         target->lock == cbs->lock &&
106                         target->unlock == cbs->unlock) {
107                         /* no change -- allow this. */
108                         return 0;
109                 }
110                 event_warnx("Can't change lock callbacks once they have been "
111                     "initialized.");
112                 return -1;
113         }
114         if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
115                 memcpy(target, cbs, sizeof(evthread_lock_fns_));
116                 return event_global_setup_locks_(1);
117         } else {
118                 return -1;
119         }
120 }
121
122 int
123 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
124 {
125         struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
126
127         if (!cbs) {
128                 if (target->alloc_condition)
129                         event_warnx("Trying to disable condition functions "
130                             "after they have been set up will probaby not "
131                             "work.");
132                 memset(target, 0, sizeof(evthread_cond_fns_));
133                 return 0;
134         }
135         if (target->alloc_condition) {
136                 /* Uh oh; we already had condition callbacks set up.*/
137                 if (target->condition_api_version == cbs->condition_api_version &&
138                         target->alloc_condition == cbs->alloc_condition &&
139                         target->free_condition == cbs->free_condition &&
140                         target->signal_condition == cbs->signal_condition &&
141                         target->wait_condition == cbs->wait_condition) {
142                         /* no change -- allow this. */
143                         return 0;
144                 }
145                 event_warnx("Can't change condition callbacks once they "
146                     "have been initialized.");
147                 return -1;
148         }
149         if (cbs->alloc_condition && cbs->free_condition &&
150             cbs->signal_condition && cbs->wait_condition) {
151                 memcpy(target, cbs, sizeof(evthread_cond_fns_));
152         }
153         if (evthread_lock_debugging_enabled_) {
154                 evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
155                 evthread_cond_fns_.free_condition = cbs->free_condition;
156                 evthread_cond_fns_.signal_condition = cbs->signal_condition;
157         }
158         return 0;
159 }
160
161 #define DEBUG_LOCK_SIG  0xdeb0b10c
162
163 struct debug_lock {
164         unsigned signature;
165         unsigned locktype;
166         unsigned long held_by;
167         /* XXXX if we ever use read-write locks, we will need a separate
168          * lock to protect count. */
169         int count;
170         void *lock;
171 };
172
173 static void *
174 debug_lock_alloc(unsigned locktype)
175 {
176         struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
177         if (!result)
178                 return NULL;
179         if (original_lock_fns_.alloc) {
180                 if (!(result->lock = original_lock_fns_.alloc(
181                                 locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
182                         mm_free(result);
183                         return NULL;
184                 }
185         } else {
186                 result->lock = NULL;
187         }
188         result->signature = DEBUG_LOCK_SIG;
189         result->locktype = locktype;
190         result->count = 0;
191         result->held_by = 0;
192         return result;
193 }
194
195 static void
196 debug_lock_free(void *lock_, unsigned locktype)
197 {
198         struct debug_lock *lock = lock_;
199         EVUTIL_ASSERT(lock->count == 0);
200         EVUTIL_ASSERT(locktype == lock->locktype);
201         EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
202         if (original_lock_fns_.free) {
203                 original_lock_fns_.free(lock->lock,
204                     lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
205         }
206         lock->lock = NULL;
207         lock->count = -100;
208         lock->signature = 0x12300fda;
209         mm_free(lock);
210 }
211
212 static void
213 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
214 {
215         EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
216         ++lock->count;
217         if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
218                 EVUTIL_ASSERT(lock->count == 1);
219         if (evthread_id_fn_) {
220                 unsigned long me;
221                 me = evthread_id_fn_();
222                 if (lock->count > 1)
223                         EVUTIL_ASSERT(lock->held_by == me);
224                 lock->held_by = me;
225         }
226 }
227
228 static int
229 debug_lock_lock(unsigned mode, void *lock_)
230 {
231         struct debug_lock *lock = lock_;
232         int res = 0;
233         if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
234                 EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
235         else
236                 EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
237         if (original_lock_fns_.lock)
238                 res = original_lock_fns_.lock(mode, lock->lock);
239         if (!res) {
240                 evthread_debug_lock_mark_locked(mode, lock);
241         }
242         return res;
243 }
244
245 static void
246 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
247 {
248         EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
249         if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
250                 EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
251         else
252                 EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
253         if (evthread_id_fn_) {
254                 unsigned long me;
255                 me = evthread_id_fn_();
256                 EVUTIL_ASSERT(lock->held_by == me);
257                 if (lock->count == 1)
258                         lock->held_by = 0;
259         }
260         --lock->count;
261         EVUTIL_ASSERT(lock->count >= 0);
262 }
263
264 static int
265 debug_lock_unlock(unsigned mode, void *lock_)
266 {
267         struct debug_lock *lock = lock_;
268         int res = 0;
269         evthread_debug_lock_mark_unlocked(mode, lock);
270         if (original_lock_fns_.unlock)
271                 res = original_lock_fns_.unlock(mode, lock->lock);
272         return res;
273 }
274
275 static int
276 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
277 {
278         int r;
279         struct debug_lock *lock = lock_;
280         EVUTIL_ASSERT(lock);
281         EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
282         EVLOCK_ASSERT_LOCKED(lock_);
283         evthread_debug_lock_mark_unlocked(0, lock);
284         r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
285         evthread_debug_lock_mark_locked(0, lock);
286         return r;
287 }
288
289 /* misspelled version for backward compatibility */
290 void
291 evthread_enable_lock_debuging(void)
292 {
293         evthread_enable_lock_debugging();
294 }
295
296 void
297 evthread_enable_lock_debugging(void)
298 {
299         struct evthread_lock_callbacks cbs = {
300                 EVTHREAD_LOCK_API_VERSION,
301                 EVTHREAD_LOCKTYPE_RECURSIVE,
302                 debug_lock_alloc,
303                 debug_lock_free,
304                 debug_lock_lock,
305                 debug_lock_unlock
306         };
307         if (evthread_lock_debugging_enabled_)
308                 return;
309         memcpy(&original_lock_fns_, &evthread_lock_fns_,
310             sizeof(struct evthread_lock_callbacks));
311         memcpy(&evthread_lock_fns_, &cbs,
312             sizeof(struct evthread_lock_callbacks));
313
314         memcpy(&original_cond_fns_, &evthread_cond_fns_,
315             sizeof(struct evthread_condition_callbacks));
316         evthread_cond_fns_.wait_condition = debug_cond_wait;
317         evthread_lock_debugging_enabled_ = 1;
318
319         /* XXX return value should get checked. */
320         event_global_setup_locks_(0);
321 }
322
323 int
324 evthread_is_debug_lock_held_(void *lock_)
325 {
326         struct debug_lock *lock = lock_;
327         if (! lock->count)
328                 return 0;
329         if (evthread_id_fn_) {
330                 unsigned long me = evthread_id_fn_();
331                 if (lock->held_by != me)
332                         return 0;
333         }
334         return 1;
335 }
336
337 void *
338 evthread_debug_get_real_lock_(void *lock_)
339 {
340         struct debug_lock *lock = lock_;
341         return lock->lock;
342 }
343
344 void *
345 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
346 {
347         /* there are four cases here:
348            1) we're turning on debugging; locking is not on.
349            2) we're turning on debugging; locking is on.
350            3) we're turning on locking; debugging is not on.
351            4) we're turning on locking; debugging is on. */
352
353         if (!enable_locks && original_lock_fns_.alloc == NULL) {
354                 /* Case 1: allocate a debug lock. */
355                 EVUTIL_ASSERT(lock_ == NULL);
356                 return debug_lock_alloc(locktype);
357         } else if (!enable_locks && original_lock_fns_.alloc != NULL) {
358                 /* Case 2: wrap the lock in a debug lock. */
359                 struct debug_lock *lock;
360                 EVUTIL_ASSERT(lock_ != NULL);
361
362                 if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
363                         /* We can't wrap it: We need a recursive lock */
364                         original_lock_fns_.free(lock_, locktype);
365                         return debug_lock_alloc(locktype);
366                 }
367                 lock = mm_malloc(sizeof(struct debug_lock));
368                 if (!lock) {
369                         original_lock_fns_.free(lock_, locktype);
370                         return NULL;
371                 }
372                 lock->lock = lock_;
373                 lock->locktype = locktype;
374                 lock->count = 0;
375                 lock->held_by = 0;
376                 return lock;
377         } else if (enable_locks && ! evthread_lock_debugging_enabled_) {
378                 /* Case 3: allocate a regular lock */
379                 EVUTIL_ASSERT(lock_ == NULL);
380                 return evthread_lock_fns_.alloc(locktype);
381         } else {
382                 /* Case 4: Fill in a debug lock with a real lock */
383                 struct debug_lock *lock = lock_;
384                 EVUTIL_ASSERT(enable_locks &&
385                               evthread_lock_debugging_enabled_);
386                 EVUTIL_ASSERT(lock->locktype == locktype);
387                 EVUTIL_ASSERT(lock->lock == NULL);
388                 lock->lock = original_lock_fns_.alloc(
389                         locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
390                 if (!lock->lock) {
391                         lock->count = -200;
392                         mm_free(lock);
393                         return NULL;
394                 }
395                 return lock;
396         }
397 }
398
399
400 #ifndef EVTHREAD_EXPOSE_STRUCTS
401 unsigned long
402 evthreadimpl_get_id_()
403 {
404         return evthread_id_fn_ ? evthread_id_fn_() : 1;
405 }
406 void *
407 evthreadimpl_lock_alloc_(unsigned locktype)
408 {
409         return evthread_lock_fns_.alloc ?
410             evthread_lock_fns_.alloc(locktype) : NULL;
411 }
412 void
413 evthreadimpl_lock_free_(void *lock, unsigned locktype)
414 {
415         if (evthread_lock_fns_.free)
416                 evthread_lock_fns_.free(lock, locktype);
417 }
418 int
419 evthreadimpl_lock_lock_(unsigned mode, void *lock)
420 {
421         if (evthread_lock_fns_.lock)
422                 return evthread_lock_fns_.lock(mode, lock);
423         else
424                 return 0;
425 }
426 int
427 evthreadimpl_lock_unlock_(unsigned mode, void *lock)
428 {
429         if (evthread_lock_fns_.unlock)
430                 return evthread_lock_fns_.unlock(mode, lock);
431         else
432                 return 0;
433 }
434 void *
435 evthreadimpl_cond_alloc_(unsigned condtype)
436 {
437         return evthread_cond_fns_.alloc_condition ?
438             evthread_cond_fns_.alloc_condition(condtype) : NULL;
439 }
440 void
441 evthreadimpl_cond_free_(void *cond)
442 {
443         if (evthread_cond_fns_.free_condition)
444                 evthread_cond_fns_.free_condition(cond);
445 }
446 int
447 evthreadimpl_cond_signal_(void *cond, int broadcast)
448 {
449         if (evthread_cond_fns_.signal_condition)
450                 return evthread_cond_fns_.signal_condition(cond, broadcast);
451         else
452                 return 0;
453 }
454 int
455 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
456 {
457         if (evthread_cond_fns_.wait_condition)
458                 return evthread_cond_fns_.wait_condition(cond, lock, tv);
459         else
460                 return 0;
461 }
462 int
463 evthreadimpl_is_lock_debugging_enabled_(void)
464 {
465         return evthread_lock_debugging_enabled_;
466 }
467
468 int
469 evthreadimpl_locking_enabled_(void)
470 {
471         return evthread_lock_fns_.lock != NULL;
472 }
473 #endif
474
475 #endif