o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better. Original patch by Lubomir Marinov.
o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa
o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication.
+ o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin
bin_SCRIPTS = event_rpcgen.py
EXTRA_DIST = acconfig.h event.h event-internal.h log.h evsignal.h evdns.3 \
- evrpc.h evrpc-internal.h \
+ evrpc.h evrpc-internal.h min_heap.h \
event.3 \
kqueue.c epoll_sub.c epoll.c select.c rtsig.c poll.c signal.c \
evport.c devpoll.c event_rpcgen.py \
extern "C" {
#endif
+#include "min_heap.h"
#include "evsignal.h"
struct event_base {
struct event_list eventqueue;
struct timeval event_tv;
- RB_HEAD(event_tree, event) timetree;
+ struct min_heap timeheap;
};
#ifdef __cplusplus
static void timeout_process(struct event_base *);
static void timeout_correct(struct event_base *, struct timeval *);
-static int
-compare(struct event *a, struct event *b)
-{
- if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
- return (-1);
- else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
- return (1);
- if (a < b)
- return (-1);
- else if (a > b)
- return (1);
- return (0);
-}
-
static void
detect_monotonic(void)
{
return (gettimeofday(tp, NULL));
}
-RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
-
-RB_GENERATE(event_tree, event, ev_timeout_node, compare);
-
-
void *
event_init(void)
{
detect_monotonic();
gettime(&base->event_tv);
- RB_INIT(&base->timetree);
+ min_heap_ctor(&base->timeheap);
TAILQ_INIT(&base->eventqueue);
TAILQ_INIT(&base->sig.signalqueue);
base->sig.ev_signal_pair[0] = -1;
if (base == current_base)
current_base = NULL;
+ /* XXX(niels) - check for internal events first */
assert(base);
if (base->evsel->dealloc != NULL)
base->evsel->dealloc(base, base->evbase);
for (i=0; i < base->nactivequeues; ++i)
assert(TAILQ_EMPTY(base->activequeues[i]));
-
- assert(RB_EMPTY(&base->timetree));
+ assert(min_heap_empty(&base->timeheap));
for (i = 0; i < base->nactivequeues; ++i)
free(base->activequeues[i]);
ev->ev_ncalls = 0;
ev->ev_pncalls = NULL;
+ min_heap_elem_init(ev);
+
/* by default, we put new events into the middle priority */
if(current_base)
ev->ev_pri = current_base->nactivequeues/2;
if (ev->ev_flags & EVLIST_TIMEOUT)
event_queue_remove(base, ev, EVLIST_TIMEOUT);
+ else if (min_heap_reserve(&base->timeheap,
+ 1 + min_heap_size(&base->timeheap)) == -1)
+ return (-1); /* ENOMEM == errno */
/* Check if it is active due to a timeout. Rescheduling
* this timeout before the callback can be executed
struct event *ev;
struct timeval *tv = *tv_p;
- if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
+ if ((ev = min_heap_top(&base->timeheap)) == NULL) {
/* if no time-based events are active wait for I/O */
*tv_p = NULL;
return (0);
static void
timeout_correct(struct event_base *base, struct timeval *tv)
{
- struct event *ev;
+ struct event **pev;
+ unsigned int size;
struct timeval off;
if (use_monotonic)
* We can modify the key element of the node without destroying
* the key, beause we apply it to all in the right order.
*/
- RB_FOREACH(ev, event_tree, &base->timetree)
- timersub(&ev->ev_timeout, &off, &ev->ev_timeout);
+ pev = base->timeheap.p;
+ size = base->timeheap.n;
+ for (; size-- > 0; ++pev) {
+ struct timeval *tv = &(**pev).ev_timeout;
+ timersub(tv, &off, tv);
+ }
}
void
timeout_process(struct event_base *base)
{
struct timeval now;
- struct event *ev, *next;
+ struct event *ev;
- if (RB_EMPTY(&base->timetree))
+ if (min_heap_empty(&base->timeheap))
return;
gettime(&now);
- for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
+ while ((ev = min_heap_top(&base->timeheap))) {
if (timercmp(&ev->ev_timeout, &now, >))
break;
- next = RB_NEXT(event_tree, &base->timetree, ev);
-
event_queue_remove(base, ev, EVLIST_TIMEOUT);
/* delete this event from the I/O queues */
TAILQ_REMOVE(&base->sig.signalqueue, ev, ev_signal_next);
break;
case EVLIST_TIMEOUT:
- RB_REMOVE(event_tree, &base->timetree, ev);
+ min_heap_erase(&base->timeheap, ev);
break;
case EVLIST_INSERTED:
TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
TAILQ_INSERT_TAIL(&base->sig.signalqueue, ev, ev_signal_next);
break;
case EVLIST_TIMEOUT: {
- struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
- assert(tmp == NULL);
+ min_heap_push(&base->timeheap, ev);
break;
}
case EVLIST_INSERTED:
struct type **tqe_prev; /* address of previous next element */ \
}
#endif /* !TAILQ_ENTRY */
-#ifndef RB_ENTRY
-#define _EVENT_DEFINED_RBENTRY
-#define RB_ENTRY(type) \
-struct { \
- struct type *rbe_left; /* left element */ \
- struct type *rbe_right; /* right element */ \
- struct type *rbe_parent; /* parent element */ \
- int rbe_color; /* node color */ \
-}
-#endif /* !RB_ENTRY */
struct event_base;
struct event {
TAILQ_ENTRY (event) ev_next;
TAILQ_ENTRY (event) ev_active_next;
TAILQ_ENTRY (event) ev_signal_next;
- RB_ENTRY (event) ev_timeout_node;
+ unsigned int min_heap_idx; /* for managing timeouts */
struct event_base *ev_base;
+
int ev_fd;
short ev_events;
short ev_ncalls;
--- /dev/null
+/*
+ * Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MIN_HEAP_H_
+#define _MIN_HEAP_H_
+
+#include "event.h"
+
+typedef struct min_heap
+{
+ struct event** p;
+ unsigned n, a;
+} min_heap_t;
+
+static inline void min_heap_ctor(min_heap_t* s);
+static inline void min_heap_dtor(min_heap_t* s);
+static inline void min_heap_elem_init(struct event* e);
+static inline int min_heap_elem_greater(struct event *a, struct event *b);
+static inline int min_heap_empty(min_heap_t* s);
+static inline unsigned min_heap_size(min_heap_t* s);
+static inline struct event* min_heap_top(min_heap_t* s);
+static inline int min_heap_reserve(min_heap_t* s, unsigned n);
+static inline int min_heap_push(min_heap_t* s, struct event* e);
+static inline struct event* min_heap_pop(min_heap_t* s);
+static inline int min_heap_erase(min_heap_t* s, struct event* e);
+static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
+
+int min_heap_elem_greater(struct event *a, struct event *b)
+{
+ return timercmp(&a->ev_timeout, &b->ev_timeout, >);
+}
+
+void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
+void min_heap_dtor(min_heap_t* s) { free(s->p); }
+void min_heap_elem_init(struct event* e) { e->min_heap_idx = -1; }
+int min_heap_empty(min_heap_t* s) { return 0u == s->n; }
+unsigned min_heap_size(min_heap_t* s) { return s->n; }
+struct event* min_heap_top(min_heap_t* s) { return s->n ? *s->p : 0; }
+
+int min_heap_push(min_heap_t* s, struct event* e)
+{
+ if(min_heap_reserve(s, s->n + 1))
+ return -1;
+ min_heap_shift_up_(s, s->n++, e);
+ return 0;
+}
+
+struct event* min_heap_pop(min_heap_t* s)
+{
+ if(s->n)
+ {
+ struct event* e = *s->p;
+ e->min_heap_idx = -1;
+ min_heap_shift_down_(s, 0u, s->p[--s->n]);
+ return e;
+ }
+ return 0;
+}
+
+int min_heap_erase(min_heap_t* s, struct event* e)
+{
+ if(-1u != e->min_heap_idx)
+ {
+ min_heap_shift_down_(s, e->min_heap_idx, s->p[--s->n]);
+ e->min_heap_idx = -1;
+ return 0;
+ }
+ return -1;
+}
+
+int min_heap_reserve(min_heap_t* s, unsigned n)
+{
+ if(s->a < n)
+ {
+ struct event** p;
+ unsigned a = s->a ? s->a * 2 : 8;
+ if(a < n)
+ a = n;
+ if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
+ return -1;
+ s->p = p;
+ s->a = a;
+ }
+ return 0;
+}
+
+void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned parent = (hole_index - 1) / 2;
+ while(hole_index && min_heap_elem_greater(s->p[parent], e))
+ {
+ (s->p[hole_index] = s->p[parent])->min_heap_idx = hole_index;
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
+ }
+ (s->p[hole_index] = e)->min_heap_idx = hole_index;
+}
+
+void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned min_child = 2 * (hole_index + 1);
+ while(min_child <= s->n)
+ {
+ min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
+ if(!(min_heap_elem_greater(e, s->p[min_child])))
+ break;
+ (s->p[hole_index] = s->p[min_child])->min_heap_idx = hole_index;
+ hole_index = min_child;
+ min_child = 2 * (hole_index + 1);
+ }
+ min_heap_shift_up_(s, hole_index, e);
+}
+
+#endif /* _MIN_HEAP_H_ */