#include "evmap-internal.h"
#include "iocp-internal.h"
#include "changelist-internal.h"
+#include "ht-internal.h"
#ifdef _EVENT_HAVE_EVENT_PORTS
extern const struct eventop evportops;
static int evthread_notify_base(struct event_base *base);
+#ifndef _EVENT_DISABLE_DEBUG_MODE
+/* These functions implement a hashtable of which 'struct event *' structures
+ * have been setup or added. We don't want to trust the content of the struct
+ * event itself, since we're trying to work through cases where an event gets
+ * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
+ */
+
+struct event_debug_entry {
+ HT_ENTRY(event_debug_entry) node;
+ const struct event *ptr;
+ unsigned added : 1;
+};
+
+static inline unsigned
+hash_debug_entry(const struct event_debug_entry *e)
+{
+ return ((unsigned)e->ptr) >> 3;
+}
+
+static inline int
+eq_debug_entry(const struct event_debug_entry *a,
+ const struct event_debug_entry *b)
+{
+ return a->ptr == b->ptr;
+}
+
+int _event_debug_mode_on = 0;
+static void *_event_debug_map_lock = NULL;
+static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
+ HT_INITIALIZER();
+
+HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
+ eq_debug_entry);
+HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
+ eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free);
+
+#define _event_debug_note_setup(ev) do { \
+ if (_event_debug_mode_on) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 0; \
+ } else { \
+ dent = mm_malloc(sizeof(*dent)); \
+ if (!dent) \
+ event_err(1, \
+ "Out of memory in debugging code"); \
+ dent->ptr = (ev); \
+ dent->added = 0; \
+ HT_INSERT(event_debug_map, &global_debug_map, dent); \
+ } \
+ EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ } \
+ } while (0)
+#define _event_debug_note_teardown(ev) do { \
+ if (_event_debug_mode_on) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
+ if (dent) \
+ mm_free(dent); \
+ EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ } \
+ } while(0)
+#define _event_debug_note_add(ev) do { \
+ if (_event_debug_mode_on) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 1; \
+ } else { \
+ event_errx(_EVENT_ERR_ABORT, \
+ "%s: noting an add on a non-setup event %p", \
+ __func__, (ev)); \
+ } \
+ EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ } \
+ } while(0)
+#define _event_debug_note_del(ev) do { \
+ if (_event_debug_mode_on) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 0; \
+ } else { \
+ event_errx(_EVENT_ERR_ABORT, \
+ "%s: noting a del on a non-setup event %p", \
+ __func__, (ev)); \
+ } \
+ EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ } \
+ } while(0)
+#define _event_debug_assert_is_setup(ev) do { \
+ if (_event_debug_mode_on) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (!dent) { \
+ event_errx(_EVENT_ERR_ABORT, \
+ "%s called on a non-initialized event %p", \
+ __func__, (ev)); \
+ } \
+ EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ } \
+ } while (0)
+
+#define _event_debug_assert_not_added(ev) do { \
+ if (_event_debug_mode_on) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent && dent->added) { \
+ event_errx(_EVENT_ERR_ABORT, \
+ "%s called on an already added event %p", \
+ __func__, (ev)); \
+ } \
+ EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ } \
+ } while (0)
+
+#else
+#define _event_debug_note_setup(ev) \
+ ((void)0)
+#define _event_debug_note_teardown(ev) \
+ ((void)0)
+#define _event_debug_note_add(ev) \
+ ((void)0)
+#define _event_debug_note_del(ev) \
+ ((void)0)
+#define _event_debug_assert_is_setup(ev) \
+ ((void)0)
+#define _event_debug_assert_not_added(ev) \
+ ((void)0)
+#endif
+
static void
detect_monotonic(void)
{
return base ? &base->defer_queue : NULL;
}
+void
+event_enable_debug_mode(void)
+{
+#ifndef _EVENT_DISABLE_DEBUG_MODE
+ if (_event_debug_mode_on)
+ event_errx(1, "%s was called twice!", __func__);
+
+ _event_debug_mode_on = 1;
+
+ HT_INIT(event_debug_map, &global_debug_map);
+
+ EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
+#endif
+}
+
+#if 0
+void
+event_disable_debug_mode(void)
+{
+ struct event_debug_entry **ent, *victim;
+
+ EVLOCK_LOCK(_event_debug_map_lock, 0);
+ for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
+ victim = *ent;
+ ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
+ mm_free(victim);
+ }
+ HT_CLEAR(event_debug_map, &global_debug_map);
+ EVLOCK_UNLOCK(_event_debug_map_lock , 0);
+}
+#endif
+
struct event_base *
event_base_new_with_config(struct event_config *cfg)
{
struct event_base *base;
int should_check_environment;
+ if (_event_debug_mode_on && !_event_debug_map_lock) {
+ EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
+ }
+
if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
event_warn("%s: calloc", __func__);
return NULL;
mm_free(cfg);
}
-
int
event_config_set_flag(struct event_config *cfg, int flag)
{
{
if (!base)
base = current_base;
+
+ _event_debug_assert_not_added(ev);
+
ev->ev_base = base;
ev->ev_callback = callback;
/* by default, we put new events into the middle priority */
ev->ev_pri = base->nactivequeues / 2;
}
+
+ _event_debug_note_setup(ev);
+
return 0;
}
if (ev->ev_flags != EVLIST_INIT)
return (-1);
+ _event_debug_assert_is_setup(ev);
+
ev->ev_base = base;
ev->ev_pri = base->nactivequeues/2;
void
event_free(struct event *ev)
{
+ _event_debug_assert_is_setup(ev);
+
/* make sure that this event won't be coming back to haunt us. */
event_del(ev);
+ _event_debug_note_teardown(ev);
mm_free(ev);
+
+}
+
+void
+event_debug_unassign(struct event *ev)
+{
+ _event_debug_assert_not_added(ev);
+ _event_debug_note_teardown(ev);
+
+ ev->ev_flags &= ~EVLIST_INIT;
}
/*
int
event_priority_set(struct event *ev, int pri)
{
+ _event_debug_assert_is_setup(ev);
+
if (ev->ev_flags & EVLIST_ACTIVE)
return (-1);
if (pri < 0 || pri >= ev->ev_base->nactivequeues)
struct timeval now, res;
int flags = 0;
+ _event_debug_assert_is_setup(ev);
+
if (ev->ev_flags & EVLIST_INSERTED)
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
if (ev->ev_flags & EVLIST_ACTIVE)
void
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
{
+ _event_debug_assert_is_setup(event);
+
if (base_out)
*base_out = event->ev_base;
if (fd_out)
evutil_socket_t
event_get_fd(const struct event *ev)
{
+ _event_debug_assert_is_setup(ev);
return ev->ev_fd;
}
struct event_base *
event_get_base(const struct event *ev)
{
+ _event_debug_assert_is_setup(ev);
return ev->ev_base;
}
short
event_get_events(const struct event *ev)
{
+ _event_debug_assert_is_setup(ev);
return ev->ev_events;
}
event_callback_fn
event_get_callback(const struct event *ev)
{
+ _event_debug_assert_is_setup(ev);
return ev->ev_callback;
}
void *
event_get_callback_arg(const struct event *ev)
{
+ _event_debug_assert_is_setup(ev);
return ev->ev_arg;
}
int res = 0;
int notify = 0;
+ _event_debug_assert_is_setup(ev);
+
event_debug((
"event_add: event: %p, %s%s%scall %p",
ev,
gettime(base, &now);
-
common_timeout = is_common_timeout(tv, base);
if (tv_is_absolute) {
ev->ev_timeout = *tv;
if (res != -1 && notify && !EVBASE_IN_THREAD(base))
evthread_notify_base(base);
+ _event_debug_note_add(ev);
+
return (res);
}
if (need_cur_lock)
EVBASE_RELEASE_LOCK(base, current_event_lock);
+ _event_debug_note_del(ev);
+
return (res);
}
{
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+ _event_debug_assert_is_setup(ev);
+
event_active_nolock(ev, res, ncalls);
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
struct event;
struct event_config;
+/** Enable some relatively expensive debugging checks in Libevent that would
+ * normally be turned off. Generally, these cause code that would otherwise
+ * crash mysteriously to fail earlier with an assertion failure. Note that
+ * this method MUST be called before any events or event_bases have been
+ * created.
+ *
+ * Debug mode can currently catch the following errors:
+ * An event is re-assigned while it is added
+ * Any function is called on a non-assigned event
+ *
+ * Note that debugging mode uses memory to track every event that has been
+ * initialized (via event_assign, event_set, or event_new) but not yet
+ * released (via event_free or event_debug_unassign). If you want to use
+ * debug mode, and you find yourself running out of memory, you will need
+ * to use event_debug_unassign to explicitly stop tracking events that
+ * are no longer considered set-up.
+ */
+void event_enable_debug_mode(void);
+
+/**
+ * When debugging mode is enabled, informs Libevent that an event should no
+ * longer be considered as assigned. When debugging mode is not enabled, does
+ * nothing.
+ *
+ * This function must only be called on a non-added event.
+ */
+void event_debug_unassign(struct event *);
+
/**
Initialize the event API.
/** Instead of checking the current time every time the event loop is
ready to run timeout callbacks, check after each timeout callback.
*/
- EVENT_BASE_FLAG_NO_CACHE_TIME = 0x08
+ EVENT_BASE_FLAG_NO_CACHE_TIME = 0x08,
};
/**
*/
int event_config_require_features(struct event_config *cfg, int feature);
-/** Sets a flag to configure what parts of the eventual event_base will
- * be initialized, and how they'll work. */
+/** Sets one or more flags to configure what parts of the eventual event_base
+ * will be initialized, and how they'll work. */
int event_config_set_flag(struct event_config *cfg, int flag);
/**