void (*fn)(int));
int _evsig_restore_handler(struct event_base *base, int evsignal);
+
void event_active_nolock(struct event *ev, int res, short count);
/* FIXME document. */
int _event_debug_mode_on = 0;
/* Set if it's too late to enable event_debug_mode. */
static int event_debug_mode_too_late = 0;
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
static void *_event_debug_map_lock = NULL;
+#endif
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
HT_INITIALIZER();
_event_debug_mode_on = 1;
HT_INIT(event_debug_map, &global_debug_map);
-
- EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
#endif
}
#ifndef _EVENT_DISABLE_DEBUG_MODE
event_debug_mode_too_late = 1;
- if (_event_debug_mode_on && !_event_debug_map_lock) {
- EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
- }
#endif
if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
evthread_notify_base(base);
EVBASE_RELEASE_LOCK(base, th_base_lock);
}
+
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+int
+event_global_setup_locks_(const int enable_locks)
+{
+#ifndef _EVENT_DISABLE_DEBUG_MODE
+ EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
+#endif
+ if (evsig_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ return 0;
+}
+#endif
int _evthread_is_debug_lock_held(void *lock);
void *_evthread_debug_get_real_lock(void *lock);
+
+void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
+ int enable_locks);
+
+#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \
+ do { \
+ lockvar = evthread_setup_global_lock_(lockvar, \
+ (locktype), enable_locks); \
+ if (!lockvar) { \
+ event_warn("Couldn't allocate %s", #lockvar); \
+ return -1; \
+ } \
+ } while (0);
+
+int event_global_setup_locks_(const int enable_locks);
+int evsig_global_setup_locks_(const int enable_locks);
+int evutil_secure_rng_global_setup_locks_(const int enable_locks);
+
#endif
#ifdef __cplusplus
}
if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
memcpy(target, cbs, sizeof(_evthread_lock_fns));
- return 0;
+ return event_global_setup_locks_(1);
} else {
return -1;
}
sizeof(struct evthread_condition_callbacks));
_evthread_cond_fns.wait_condition = debug_cond_wait;
_evthread_lock_debugging_enabled = 1;
+
+ /* XXX return value should get checked. */
+ event_global_setup_locks_(0);
}
int
return lock->lock;
}
+void *
+evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
+{
+ /* there are four cases here:
+ 1) we're turning on debugging; locking is not on.
+ 2) we're turning on debugging; locking is on.
+ 3) we're turning on locking; debugging is not on.
+ 4) we're turning on locking; debugging is on. */
+
+ if (!enable_locks && _original_lock_fns.alloc == NULL) {
+ /* Case 1: allocate a debug lock. */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return debug_lock_alloc(locktype);
+ } else if (!enable_locks && _original_lock_fns.alloc != NULL) {
+ /* Case 2: wrap the lock in a debug lock. */
+ struct debug_lock *lock;
+ EVUTIL_ASSERT(lock_ != NULL);
+
+ if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
+ /* We can't wrap it: We need a recursive lock */
+ _original_lock_fns.free(lock_, locktype);
+ return debug_lock_alloc(locktype);
+ }
+ lock = mm_malloc(sizeof(struct debug_lock));
+ if (!lock) {
+ _original_lock_fns.free(lock_, locktype);
+ return NULL;
+ }
+ lock->lock = lock_;
+ lock->locktype = locktype;
+ lock->count = 0;
+ lock->held_by = 0;
+ return lock;
+ } else if (enable_locks && ! _evthread_lock_debugging_enabled) {
+ /* Case 3: allocate a regular lock */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return _evthread_lock_fns.alloc(locktype);
+ } else {
+ /* Case 4: Fill in a debug lock with a real lock */
+ struct debug_lock *lock = lock_;
+ EVUTIL_ASSERT(enable_locks &&
+ _evthread_lock_debugging_enabled);
+ EVUTIL_ASSERT(lock->locktype == locktype);
+ EVUTIL_ASSERT(lock->lock == NULL);
+ lock->lock = _original_lock_fns.alloc(
+ locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock->lock) {
+ lock->count = -200;
+ mm_free(lock);
+ return NULL;
+ }
+ return lock;
+ }
+}
+
+
#ifndef EVTHREAD_EXPOSE_STRUCTS
unsigned long
_evthreadimpl_get_id()
(void) arc4random();
return 0;
}
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+ return 0;
+}
#ifndef _EVENT_HAVE_ARC4RANDOM_BUF
static void
#include "./arc4random.c"
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(arc4rand_lock, 0);
+ return 0;
+}
+#endif
+
int
evutil_secure_rng_init(void)
{
int val;
- if (!arc4rand_lock) {
- EVTHREAD_ALLOC_LOCK(arc4rand_lock, 0);
- }
_ARC4_LOCK();
if (!arc4_seeded_ok)
int
evsig_init(struct event_base *base)
{
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
- if (! evsig_base_lock)
- EVTHREAD_ALLOC_LOCK(evsig_base_lock, 0);
-#endif
-
/*
* Our signal handler is going to write to one end of the socket
* pair to wake up our event loop. The event loop then scans for
base->sig.sh_old = NULL;
}
}
+
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+int
+evsig_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(evsig_base_lock, 0);
+ return 0;
+}
+#endif