summaryrefslogtreecommitdiff
path: root/evthread.c
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2011-04-22 12:01:25 -0400
committerNick Mathewson <nickm@torproject.org>2011-04-22 14:06:33 -0400
commitb683cae3cb98b48c4dc5caa0b1768f7bb13b92f1 (patch)
tree89c2c36e4e461f7e81a89cb01bf4952ebb8c9bdd /evthread.c
parentb4f89f00c607689d867a83d9d52535f371c5d750 (diff)
downloadlibevent-b683cae3cb98b48c4dc5caa0b1768f7bb13b92f1.tar.gz
Avoid race-condition when initializing global locks
Previously, we did stuff like if (!lock) EVTHREAD_ALLOC_LOCK(lock,0); for the evsig base global lock, the arc4random lock, and the debug_map lock. But that's potentially racy! Instead, we move the responisiblity for global lock initialization to the functions where we set up the lock callbacks. (Rationale: We already require that you set up the locking callbacks before you create any event_base, and that you do so exatly once.)
Diffstat (limited to 'evthread.c')
-rw-r--r--evthread.c61
1 files changed, 60 insertions, 1 deletions
diff --git a/evthread.c b/evthread.c
index 35f14b2b..8fd62743 100644
--- a/evthread.c
+++ b/evthread.c
@@ -81,7 +81,7 @@ evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
}
if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
memcpy(target, cbs, sizeof(_evthread_lock_fns));
- return 0;
+ return event_global_setup_locks_(1);
} else {
return -1;
}
@@ -246,6 +246,9 @@ evthread_enable_lock_debuging(void)
sizeof(struct evthread_condition_callbacks));
_evthread_cond_fns.wait_condition = debug_cond_wait;
_evthread_lock_debugging_enabled = 1;
+
+ /* XXX return value should get checked. */
+ event_global_setup_locks_(0);
}
int
@@ -269,6 +272,62 @@ _evthread_debug_get_real_lock(void *lock_)
return lock->lock;
}
+void *
+evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
+{
+ /* there are four cases here:
+ 1) we're turning on debugging; locking is not on.
+ 2) we're turning on debugging; locking is on.
+ 3) we're turning on locking; debugging is not on.
+ 4) we're turning on locking; debugging is on. */
+
+ if (!enable_locks && _original_lock_fns.alloc == NULL) {
+ /* Case 1: allocate a debug lock. */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return debug_lock_alloc(locktype);
+ } else if (!enable_locks && _original_lock_fns.alloc != NULL) {
+ /* Case 2: wrap the lock in a debug lock. */
+ struct debug_lock *lock;
+ EVUTIL_ASSERT(lock_ != NULL);
+
+ if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
+ /* We can't wrap it: We need a recursive lock */
+ _original_lock_fns.free(lock_, locktype);
+ return debug_lock_alloc(locktype);
+ }
+ lock = mm_malloc(sizeof(struct debug_lock));
+ if (!lock) {
+ _original_lock_fns.free(lock_, locktype);
+ return NULL;
+ }
+ lock->lock = lock_;
+ lock->locktype = locktype;
+ lock->count = 0;
+ lock->held_by = 0;
+ return lock;
+ } else if (enable_locks && ! _evthread_lock_debugging_enabled) {
+ /* Case 3: allocate a regular lock */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return _evthread_lock_fns.alloc(locktype);
+ } else {
+ /* Case 4: Fill in a debug lock with a real lock */
+ struct debug_lock *lock = lock_;
+ EVUTIL_ASSERT(enable_locks &&
+ _evthread_lock_debugging_enabled);
+ EVUTIL_ASSERT(lock->locktype == locktype);
+ EVUTIL_ASSERT(lock->lock == NULL);
+ lock->lock = _original_lock_fns.alloc(
+ locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock->lock) {
+ lock->count = -200;
+ mm_free(lock);
+ return NULL;
+ }
+ return lock;
+ }
+}
+
+
#ifndef EVTHREAD_EXPOSE_STRUCTS
unsigned long
_evthreadimpl_get_id()