]> granicus.if.org Git - libevent/commitdiff
Avoid race-condition when initializing global locks
authorNick Mathewson <nickm@torproject.org>
Fri, 22 Apr 2011 16:01:25 +0000 (12:01 -0400)
committerNick Mathewson <nickm@torproject.org>
Fri, 22 Apr 2011 18:06:33 +0000 (14:06 -0400)
Previously, we did stuff like
   if (!lock)
      EVTHREAD_ALLOC_LOCK(lock,0);
for the evsig base global lock, the arc4random lock, and the debug_map
lock.  But that's potentially racy!  Instead, we move the
responisiblity for global lock initialization to the functions where
we set up the lock callbacks.

(Rationale: We already require that you set up the locking callbacks
before you create any event_base, and that you do so exatly once.)

event-internal.h
event.c
evthread-internal.h
evthread.c
evutil_rand.c
signal.c

index 12ca8bc194d09a9498cdb7c4c85c98620d7fdc9f..01b9c9ef4ed19a5bb4bd26f951c6c4d7a49499b2 100644 (file)
@@ -335,6 +335,7 @@ int _evsig_set_handler(struct event_base *base, int evsignal,
                          void (*fn)(int));
 int _evsig_restore_handler(struct event_base *base, int evsignal);
 
+
 void event_active_nolock(struct event *ev, int res, short count);
 
 /* FIXME document. */
diff --git a/event.c b/event.c
index 41c7d8f4fd86023948944fc12c53c96e69b9161f..b5b6ee9b4202441ff183a6161ee76689e970f8f1 100644 (file)
--- a/event.c
+++ b/event.c
@@ -182,7 +182,9 @@ eq_debug_entry(const struct event_debug_entry *a,
 int _event_debug_mode_on = 0;
 /* Set if it's too late to enable event_debug_mode. */
 static int event_debug_mode_too_late = 0;
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
 static void *_event_debug_map_lock = NULL;
+#endif
 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
        HT_INITIALIZER();
 
@@ -514,8 +516,6 @@ event_enable_debug_mode(void)
        _event_debug_mode_on = 1;
 
        HT_INIT(event_debug_map, &global_debug_map);
-
-       EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
 #endif
 }
 
@@ -545,9 +545,6 @@ event_base_new_with_config(const struct event_config *cfg)
 
 #ifndef _EVENT_DISABLE_DEBUG_MODE
        event_debug_mode_too_late = 1;
-       if (_event_debug_mode_on && !_event_debug_map_lock) {
-               EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
-       }
 #endif
 
        if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
@@ -2813,3 +2810,18 @@ event_base_del_virtual(struct event_base *base)
                evthread_notify_base(base);
        EVBASE_RELEASE_LOCK(base, th_base_lock);
 }
+
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+int
+event_global_setup_locks_(const int enable_locks)
+{
+#ifndef _EVENT_DISABLE_DEBUG_MODE
+       EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
+#endif
+       if (evsig_global_setup_locks_(enable_locks) < 0)
+               return -1;
+       if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
+               return -1;
+       return 0;
+}
+#endif
index 9317fdf1baeb75a86a488f4e6a69d26305fa18a8..fbb1c0d47c05f4361c1c24062c7eac25ed1d594f 100644 (file)
@@ -345,6 +345,24 @@ EVLOCK_TRY_LOCK(void *lock)
 
 int _evthread_is_debug_lock_held(void *lock);
 void *_evthread_debug_get_real_lock(void *lock);
+
+void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
+    int enable_locks);
+
+#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype)                  \
+       do {                                                            \
+               lockvar = evthread_setup_global_lock_(lockvar,          \
+                   (locktype), enable_locks);                          \
+               if (!lockvar) {                                         \
+                       event_warn("Couldn't allocate %s", #lockvar);   \
+                       return -1;                                      \
+               }                                                       \
+       } while (0);
+
+int event_global_setup_locks_(const int enable_locks);
+int evsig_global_setup_locks_(const int enable_locks);
+int evutil_secure_rng_global_setup_locks_(const int enable_locks);
+
 #endif
 
 #ifdef __cplusplus
index 35f14b2b2e8dffa7e1f0725c013d5e228fb0dc9c..8fd627434385a70286015af71df5f6ef1a29a2f6 100644 (file)
@@ -81,7 +81,7 @@ evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
        }
        if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
                memcpy(target, cbs, sizeof(_evthread_lock_fns));
-               return 0;
+               return event_global_setup_locks_(1);
        } else {
                return -1;
        }
@@ -246,6 +246,9 @@ evthread_enable_lock_debuging(void)
            sizeof(struct evthread_condition_callbacks));
        _evthread_cond_fns.wait_condition = debug_cond_wait;
        _evthread_lock_debugging_enabled = 1;
+
+       /* XXX return value should get checked. */
+       event_global_setup_locks_(0);
 }
 
 int
@@ -269,6 +272,62 @@ _evthread_debug_get_real_lock(void *lock_)
        return lock->lock;
 }
 
+void *
+evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
+{
+       /* there are four cases here:
+          1) we're turning on debugging; locking is not on.
+          2) we're turning on debugging; locking is on.
+          3) we're turning on locking; debugging is not on.
+          4) we're turning on locking; debugging is on. */
+
+       if (!enable_locks && _original_lock_fns.alloc == NULL) {
+               /* Case 1: allocate a debug lock. */
+               EVUTIL_ASSERT(lock_ == NULL);
+               return debug_lock_alloc(locktype);
+       } else if (!enable_locks && _original_lock_fns.alloc != NULL) {
+               /* Case 2: wrap the lock in a debug lock. */
+               struct debug_lock *lock;
+               EVUTIL_ASSERT(lock_ != NULL);
+
+               if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
+                       /* We can't wrap it: We need a recursive lock */
+                       _original_lock_fns.free(lock_, locktype);
+                       return debug_lock_alloc(locktype);
+               }
+               lock = mm_malloc(sizeof(struct debug_lock));
+               if (!lock) {
+                       _original_lock_fns.free(lock_, locktype);
+                       return NULL;
+               }
+               lock->lock = lock_;
+               lock->locktype = locktype;
+               lock->count = 0;
+               lock->held_by = 0;
+               return lock;
+       } else if (enable_locks && ! _evthread_lock_debugging_enabled) {
+               /* Case 3: allocate a regular lock */
+               EVUTIL_ASSERT(lock_ == NULL);
+               return _evthread_lock_fns.alloc(locktype);
+       } else {
+               /* Case 4: Fill in a debug lock with a real lock */
+               struct debug_lock *lock = lock_;
+               EVUTIL_ASSERT(enable_locks &&
+                             _evthread_lock_debugging_enabled);
+               EVUTIL_ASSERT(lock->locktype == locktype);
+               EVUTIL_ASSERT(lock->lock == NULL);
+               lock->lock = _original_lock_fns.alloc(
+                       locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+               if (!lock->lock) {
+                       lock->count = -200;
+                       mm_free(lock);
+                       return NULL;
+               }
+               return lock;
+       }
+}
+
+
 #ifndef EVTHREAD_EXPOSE_STRUCTS
 unsigned long
 _evthreadimpl_get_id()
index 07ad2ea67ddf27d03706cd3fc2ca04e9affb88c6..82ea115eac93ce896bff35e22a1820534a64ff12 100644 (file)
@@ -49,6 +49,11 @@ evutil_secure_rng_init(void)
        (void) arc4random();
        return 0;
 }
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+       return 0;
+}
 
 #ifndef _EVENT_HAVE_ARC4RANDOM_BUF
 static void
@@ -93,13 +98,19 @@ static void *arc4rand_lock;
 
 #include "./arc4random.c"
 
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+       EVTHREAD_SETUP_GLOBAL_LOCK(arc4rand_lock, 0);
+       return 0;
+}
+#endif
+
 int
 evutil_secure_rng_init(void)
 {
        int val;
-       if (!arc4rand_lock) {
-               EVTHREAD_ALLOC_LOCK(arc4rand_lock, 0);
-       }
 
        _ARC4_LOCK();
        if (!arc4_seeded_ok)
index ec27ea88428f966b2754a78c08fe24f5f43c857b..4e7fecff39f779bb14e7020602f1faffc2a0109c 100644 (file)
--- a/signal.c
+++ b/signal.c
@@ -168,11 +168,6 @@ evsig_cb(evutil_socket_t fd, short what, void *arg)
 int
 evsig_init(struct event_base *base)
 {
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
-       if (! evsig_base_lock)
-               EVTHREAD_ALLOC_LOCK(evsig_base_lock, 0);
-#endif
-
        /*
         * Our signal handler is going to write to one end of the socket
         * pair to wake up our event loop.  The event loop then scans for
@@ -436,3 +431,12 @@ evsig_dealloc(struct event_base *base)
                base->sig.sh_old = NULL;
        }
 }
+
+#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+int
+evsig_global_setup_locks_(const int enable_locks)
+{
+       EVTHREAD_SETUP_GLOBAL_LOCK(evsig_base_lock, 0);
+       return 0;
+}
+#endif