/* mutually exclusive */
#define ev_signal_next _ev.ev_signal.ev_signal_next
-
#define ev_io_next _ev.ev_io.ev_io_next
#define ev_io_timeout _ev.ev_io.ev_timeout
#define ev_ncalls _ev.ev_signal.ev_ncalls
#define ev_pncalls _ev.ev_signal.ev_pncalls
-/* Possible event closures. */
+/* Possible values for ev_closure in struct event. */
#define EV_CLOSURE_NONE 0
#define EV_CLOSURE_SIGNAL 1
#define EV_CLOSURE_PERSIST 2
struct eventop {
/** The name of this backend. */
const char *name;
- /** Set up an event_base to use this backend.*/
+ /** Function to set up an event_base to use this backend. It should
+ * create a new structure holding whatever information is needed to
+ * run the backend, and return it. The returned pointer will get
+ * stored by event_init into the event_base.evbase field. On failure,
+ * this function should return NULL. */
void *(*init)(struct event_base *);
- /** Enable reading/writing on a given fd. */
+ /** Enable reading/writing on a given fd or signal. 'events' will be
+ * the events that we're trying to enable: one or more of EV_READ,
+ * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
+ * were enabled on this fd previously. 'fdinfo' will be a structure
+ * associated with the fd by the evmap; its size is defined by the
+ * fdinfo field below. It will be set to 0 the first time the fd is
+ * added. The function should return 0 on success and -1 on error.
+ */
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
- /** Disable reading/writing on a given fd. */
+ /** As "add", except 'events' contains the events we mean to disable. */
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** Function to implement the core of an event loop. It must see which
added events are ready, and cause event_active to be called for each
- active event (usually via event_io_active or such).
+ active event (usually via event_io_active or such). It should
+ return 0 on success and -1 on error.
*/
int (*dispatch)(struct event_base *, struct timeval *);
/** Function to clean up and free our data from the event_base. */
void (*dealloc)(struct event_base *);
- /** Set if we need to reinitialize the event base after we fork. */
+ /** Flag: set if we need to reinitialize the event base after we fork.
+ */
int need_reinit;
- /** Bit-array of supported event_method_features */
+ /** Bit-array of supported event_method_features that this backend can
+ * provide. */
enum event_method_feature features;
- /** Length of extra information we should record for each fd that
- has one or more active events.
+ /** Length of the extra information we should record for each fd that
+ has one or more active events. This information is recorded
+ as part of the evmap entry for each fd, and passed as an argument
+ to the add and del functions above.
*/
size_t fdinfo_len;
};
#endif
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
- defined, this is also used as event_io_map, to map fds to a list of events.
+ defined, this structure is also used as event_io_map, which maps fds to a
+ list of events.
*/
struct event_signal_map {
+ /* An array of evmap_io * or of evmap_signal *; empty entries are
+ * set to NULL. */
void **entries;
+ /* The number of entries available in entries */
int nentries;
};
+/* A list of events waiting on a given 'common' timeout value. Ordinarily,
+ * events waiting for a timeout wait on a minheap. Sometimes, however, a
+ * queue can be faster.
+ **/
struct common_timeout_list {
+ /* List of events currently waiting in the queue. */
struct event_list events;
+ /* 'magic' timeval used to indicate the duration of events in this
+ * queue. */
struct timeval duration;
+ /* Event that triggers whenever one of the events in the queue is
+ * ready to activate */
struct event timeout_event;
+ /* The event_base that this timeout list is part of */
struct event_base *base;
};
struct event_change;
+/* List of 'changes' since the last call to eventop.dispatch. Only maintained
+ * if the backend is using changesets. */
struct event_changelist {
struct event_change *changes;
int n_changes;
};
#ifndef _EVENT_DISABLE_DEBUG_MODE
+/* Global internal flag: set to one if debug mode is on. */
extern int _event_debug_mode_on;
#endif
* by the O(1) backends. */
struct event_changelist changelist;
- /* signal handling info */
+ /** Function pointers used to describe the backend that this event_base
+ * uses for signals */
const struct eventop *evsigsel;
+ /** Pointer to signal backend-specific data*/
void *evsigbase;
+ /** Data to implement the common signal handelr code. */
struct evsig_info sig;
- int event_count; /**< counts number of total events */
- int event_count_active; /**< counts number of active events */
+ /** Number of total events added to this event_base */
+ int event_count;
+ /** Number of total events active in this event_base */
+ int event_count_active;
- int event_gotterm; /**< Set to terminate loop once done
- * processing events. */
- int event_break; /**< Set to exit loop immediately */
+ /** Set if we should terminate the loop once we're done processing
+ * events. */
+ int event_gotterm;
+ /** Set if we should terminate the loop immediately */
+ int event_break;
/* Active event management. */
/** An array of nactivequeues queues for active events (ones that
* priority numbers are more important, and stall higher ones.
*/
struct event_list *activequeues;
+ /** The length of the activequeues array */
int nactivequeues;
+ /* common timeout logic */
+
+ /** An array of common_timeout_list* for all of the common timeout
+ * values we know. */
struct common_timeout_list **common_timeout_queues;
+ /** The number of entries used in common_timeout_queues */
int n_common_timeouts;
+ /** The total size of common_timeout_queues. */
int n_common_timeouts_allocated;
/** The event whose callback is executing right now */
struct event *current_event;
+ /** List of defered_cb that are active. We run these after the active
+ * events. */
struct deferred_cb_queue defer_queue;
- /** Mapping from file descriptors to enabled events */
- struct event_io_map io;
+ /** Mapping from file descriptors to enabled (added) events */
+ struct event_io_map io;
- /** Mapping from signal numbers to enabled events. */
+ /** Mapping from signal numbers to enabled (added) events. */
struct event_signal_map sigmap;
/** All events that have been enabled (added) in this event_base */
struct event_list eventqueue;
+ /** Stored timeval; used to detect when time is running backwards. */
struct timeval event_tv;
/** Priority queue of events with timeouts. */
struct min_heap timeheap;
+ /** Stored timeval: used to avoid calling gettimeofday too often. */
struct timeval tv_cache;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
#endif
#ifdef WIN32
+ /** IOCP support structure, if IOCP is enabled. */
struct event_iocp_port *iocp;
#endif
+ /** Flags that this base was configured with */
enum event_base_config_flag flags;
/* Notify main thread to wake up break, etc. */
+ /** A socketpair used by some th_notify functions to wake up the main
+ * thread. */
int th_notify_fd[2];
+ /** An event used by some th_notify functions to wake up the main
+ * thread. */
struct event th_notify;
+ /** A function used to wake up the main thread from another thread. */
int (*th_notify_fn)(struct event_base *base);
};
extern const struct eventop win32ops;
#endif
-/* In order of preference */
+/* Array of backends in order of preference. */
static const struct eventop *eventops[] = {
#ifdef _EVENT_HAVE_EVENT_PORTS
&evportops,
NULL
};
-/* Global state */
+/* Global state; deprecated */
struct event_base *current_base = NULL;
+
+/* Global state */
extern struct event_base *evsig_base;
static int use_monotonic;
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free);
+/* Macro: record that ev is now setup (that is, ready for an add) */
#define _event_debug_note_setup(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
+/* Macro: record that ev is no longer setup */
#define _event_debug_note_teardown(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
+/* Macro: record that ev is now added */
#define _event_debug_note_add(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
+/* Macro: record that ev is no longer added */
#define _event_debug_note_del(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
+/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
#define _event_debug_assert_is_setup(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
-
+/* Macro: assert that ev is not added (i.e., okay to tear down or set
+ * up again) */
#define _event_debug_assert_not_added(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
#define EVENT_BASE_ASSERT_LOCKED(base) \
EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
+/* The first time this function is called, it sets use_monotonic to 1
+ * if we have a clock function that supports monotonic time */
static void
detect_monotonic(void)
{
#endif
}
+/** Set 'tp' to the current time according to 'base'. We must hold the lock
+ * on 'base'. If there is a cached time, return it. Otherwise, use
+ * clock_gettime or gettimeofday as appropriate to find out the right time.
+ * Return 0 on success, -1 on failure.
+ */
static int
gettime(struct event_base *base, struct timeval *tp)
{
return r;
}
+/** Make 'base' have no current cached time. */
static inline void
clear_time_cache(struct event_base *base)
{
base->tv_cache.tv_sec = 0;
}
+/** Replace the cached time in 'base' with the current time. */
static inline void
update_time_cache(struct event_base *base)
{
return base;
}
+/** Return true iff 'method' is the name of a method that 'cfg' tells us to
+ * avoid. */
static int
event_config_is_avoided_method(const struct event_config *cfg,
const char *method)
return (0);
}
+/** Return true iff 'method' is disabled according to the environment. */
static int
event_is_method_disabled(const char *name)
{
evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
for (i = 8; environment[i] != '\0'; ++i)
environment[i] = toupper(environment[i]);
+ /* Note that evutil_getenv() ignores the environment entirely if
+ * we're setuid */
return (evutil_getenv(environment) != NULL);
}
TAILQ_INIT(&cb->deferred_cb_list);
}
+/** Helper for the deferred_cb queue: wake up the event base. */
static void
notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
{
struct event_base *base = baseptr;
if (!EVBASE_IN_THREAD(base))
- evthread_notify_base(base);
+ evthread_notify_base(base);
}
struct deferred_cb_queue *
return (0);
}
+/* Returns true iff we're currently watching any events. */
static int
event_haveevents(struct event_base *base)
{
return (base->event_count > 0);
}
+/* "closure" function called when processing active signal events */
static inline void
event_signal_closure(struct event_base *base, struct event *ev)
{
}
}
+/* Common timeouts are special timeouts that are handled as queues rather than
+ * in the minheap. This is more efficient than the minheap if we happen to
+ * know that we're going to get several thousands of timeout events all with
+ * the same timeout value.
+ *
+ * Since all our timeout handling code assumes timevals can be copied,
+ * assigned, etc, we can't use "magic pointer" to encode these common
+ * timeouts. Searching through a list to see if every timeout is common could
+ * also get inefficient. Instead, we take advantage of the fact that tv_usec
+ * is 32 bits long, but only uses 20 of those bits (since it can never be over
+ * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
+ * of index into the event_base's aray of common timeouts.
+ */
+
#define MICROSECONDS_MASK 0x000fffff
#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
#define COMMON_TIMEOUT_IDX_SHIFT 20
#define COMMON_TIMEOUT_IDX(tv) \
(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
+/** Return true iff if 'tv' is a common timeout in 'base' */
static inline int
is_common_timeout(const struct timeval *tv,
const struct event_base *base)
(tv2->tv_usec & ~MICROSECONDS_MASK);
}
+/** Requires that 'tv' is a common timeout. Return the corresponding
+ * common_timeout_list. */
static inline struct common_timeout_list *
get_common_timeout_list(struct event_base *base, const struct timeval *tv)
{
return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
}
+#if 0
static inline int
common_timeout_ok(const struct timeval *tv,
struct event_base *base)
return tv->tv_sec == expect->tv_sec &&
tv->tv_usec == expect->tv_usec;
}
+#endif
+/* Add the timeout for the first event in given common timeout list to the
+ * event_base's minheap. */
static void
common_timeout_schedule(struct common_timeout_list *ctl,
const struct timeval *now, struct event *head)
event_add_internal(&ctl->timeout_event, &timeout, 1);
}
+/* Callback: invoked when the timeout for a common timeout queue triggers.
+ * This means that (at least) the first event in that queue should be run,
+ * and the timeout should be rescheduled if there are more events. */
static void
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
{
return result;
}
+/* Closure function invoked when we're activating a persistent event. */
static inline void
event_persist_closure(struct event_base *base, struct event *ev)
{
return count;
}
+/*
+ Process all the defered_cb entries in 'queue'. If *breakptr becomes set to
+ 1, stop. Requires that we start out holding the lock on 'queue'; releases
+ the lock around 'queue' for each deferred_cb we process.
+ */
static int
event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
{
return (base->evsel->name);
}
+/** Callback: used to implement event_base_loopexit by telling the event_base
+ * that it's time to exit its loop. */
static void
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
{
base->event_gotterm = 1;
}
-/* not thread safe */
int
event_loopexit(const struct timeval *tv)
{
}
/* Sets up an event for processing once */
-
struct event_once {
struct event ev;
void *arg;
};
-/* One-time callback, it deletes itself */
-
+/* One-time callback to implement event_base_once: invokes the user callback,
+ * then deletes the allocated storage */
static void
event_once_cb(evutil_socket_t fd, short events, void *arg)
{
return (res);
}
+/* Helper callback: wake an event_base from another thread. This version
+ * works by writing a byte to one end of a socketpair, so that the event_base
+ * listening on the other end will wake up as the corresponding event
+ * triggers */
static int
evthread_notify_base_default(struct event_base *base)
{
}
#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
+/* Helper callback: wake an event_base from another thread. This version
+ * assumes that you have a working eventfd() implementation. */
static int
evthread_notify_base_eventfd(struct event_base *base)
{
}
#endif
+/** Tell the thread currently running the event_loop for base (if any) that it
+ * needs to stop waiting in its dispatch function (if it is) and process all
+ * active events and deferred callbacks (if there are any). */
static int
evthread_notify_base(struct event_base *base)
{
return base->th_notify_fn(base);
}
+/* Implementation function to add an event. Works just like event_add,
+ * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
+ * we treat tv as an absolute time, not as an interval to add to the current
+ * time */
static inline int
event_add_internal(struct event *ev, const struct timeval *tv,
int tv_is_absolute)
cb->queued = 1;
TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
++queue->active_count;
+ /* XXXX Can we get away with doing this only when adding
+ * the first active deferred_cb to the queue? */
if (queue->notify_fn)
queue->notify_fn(queue, queue->notify_arg);
}
}
/*
- * Determines if the time is running backwards by comparing the current
- * time against the last time we checked. Not needed when using clock
- * monotonic.
+ * Determines if the time is running backwards by comparing the current time
+ * against the last time we checked. Not needed when using clock monotonic.
+ * If time is running backwards, we adjust the firing time of every event by
+ * the amount that time seems to have jumped.
*/
-
static void
timeout_correct(struct event_base *base, struct timeval *tv)
{
/*
* We can modify the key element of the node without destroying
- * the key, because we apply it to all in the right order.
+ * the minheap property, because we change every element.
*/
pev = base->timeheap.p;
size = base->timeheap.n;
base->event_tv = *tv;
}
+/* Activate every event whose timeout has elapsed. */
static void
timeout_process(struct event_base *base)
{
}
}
+/* Remove 'ev' from 'queue' (EVLIST_...) in base. */
static void
event_queue_remove(struct event_base *base, struct event *ev, int queue)
{
}
}
+/* Add 'ev' to the common timeout list in 'ev'. */
static void
insert_common_timeout_inorder(struct common_timeout_list *ctl,
struct event *ev)
{
struct event *e;
+ /* By all logic, we should just be able to append 'ev' to the end of
+ * ctl->events, since the timeout on each 'ev' is set to {the common
+ * timeout} + {the time when we add the event}, and so the events
+ * should arrive in order of their timeeouts. But just in case
+ * there's some wacky threading issue going on, we do a search from
+ * the end of 'ev' to find the right insertion point.
+ */
TAILQ_FOREACH_REVERSE(e, &ctl->events,
ev_timeout_pos.ev_next_with_common_timeout, event_list) {
/* This timercmp is a little sneaky, since both ev and e have