static void
event_base_free_(struct event_base *base, int run_finalizers)
{
- int i, n_deleted=0;
+ int i;
+ size_t n_deleted=0;
struct event *ev;
struct evwatch *watcher;
/* XXXX grab the lock? If there is contention when one thread frees
}
if (n_deleted)
- event_debug(("%s: %d events were still set in base",
+ event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
__func__, n_deleted));
while (LIST_FIRST(&base->once_events)) {
event_base_foreach_event_cb fn, void *arg)
{
int r, i;
- unsigned u;
+ size_t u;
struct event *ev;
/* Start out with all the EVLIST_INSERTED events. */
/* If we want to activate timer events, loop and activate each event with
* the same fd in both the timeheap and common timeouts list */
int i;
- unsigned u;
+ size_t u;
struct event *ev;
for (u = 0; u < base->timeheap.n; ++u) {
event_base_assert_ok_nolock_(struct event_base *base)
{
int i;
+ size_t u;
int count;
/* First do checks on the per-fd and per-signal lists */
evmap_check_integrity_(base);
/* Check the heap property */
- for (i = 1; i < (int)base->timeheap.n; ++i) {
- int parent = (i - 1) / 2;
+ for (u = 1; u < base->timeheap.n; ++u) {
+ size_t parent = (u - 1) / 2;
struct event *ev, *p_ev;
- ev = base->timeheap.p[i];
+ ev = base->timeheap.p[u];
p_ev = base->timeheap.p[parent];
EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
- EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
+ EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
}
/* Check that the common timeouts are fine */
typedef struct min_heap
{
struct event** p;
- unsigned n, a;
+ size_t n, a;
} min_heap_t;
static inline void min_heap_ctor_(min_heap_t* s);
static inline void min_heap_elem_init_(struct event* e);
static inline int min_heap_elt_is_top_(const struct event *e);
static inline int min_heap_empty_(min_heap_t* s);
-static inline unsigned min_heap_size_(min_heap_t* s);
+static inline size_t min_heap_size_(min_heap_t* s);
static inline struct event* min_heap_top_(min_heap_t* s);
-static inline int min_heap_reserve_(min_heap_t* s, unsigned n);
+static inline int min_heap_reserve_(min_heap_t* s, size_t n);
static inline int min_heap_push_(min_heap_t* s, struct event* e);
static inline struct event* min_heap_pop_(min_heap_t* s);
static inline int min_heap_adjust_(min_heap_t *s, struct event* e);
static inline int min_heap_erase_(min_heap_t* s, struct event* e);
-static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
-static inline void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e);
-static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_up_(min_heap_t* s, size_t hole_index, struct event* e);
+static inline void min_heap_shift_up_unconditional_(min_heap_t* s, size_t hole_index, struct event* e);
+static inline void min_heap_shift_down_(min_heap_t* s, size_t hole_index, struct event* e);
#define min_heap_elem_greater(a, b) \
(evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >))
void min_heap_ctor_(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
void min_heap_dtor_(min_heap_t* s) { if (s->p) mm_free(s->p); }
-void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = -1; }
-int min_heap_empty_(min_heap_t* s) { return 0u == s->n; }
-unsigned min_heap_size_(min_heap_t* s) { return s->n; }
+void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = EV_SIZE_MAX; }
+int min_heap_empty_(min_heap_t* s) { return 0 == s->n; }
+size_t min_heap_size_(min_heap_t* s) { return s->n; }
struct event* min_heap_top_(min_heap_t* s) { return s->n ? *s->p : 0; }
int min_heap_push_(min_heap_t* s, struct event* e)
if (s->n)
{
struct event* e = *s->p;
- min_heap_shift_down_(s, 0u, s->p[--s->n]);
- e->ev_timeout_pos.min_heap_idx = -1;
+ min_heap_shift_down_(s, 0, s->p[--s->n]);
+ e->ev_timeout_pos.min_heap_idx = EV_SIZE_MAX;
return e;
}
return 0;
int min_heap_erase_(min_heap_t* s, struct event* e)
{
- if (-1 != e->ev_timeout_pos.min_heap_idx)
+ if (EV_SIZE_MAX != e->ev_timeout_pos.min_heap_idx)
{
struct event *last = s->p[--s->n];
- unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+ size_t parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
/* we replace e with the last element in the heap. We might need to
shift it upward if it is less than its parent, or downward if it is
greater than one or both its children. Since the children are known
min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last);
else
min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
- e->ev_timeout_pos.min_heap_idx = -1;
+ e->ev_timeout_pos.min_heap_idx = EV_SIZE_MAX;
return 0;
}
return -1;
int min_heap_adjust_(min_heap_t *s, struct event *e)
{
- if (-1 == e->ev_timeout_pos.min_heap_idx) {
+ if (EV_SIZE_MAX == e->ev_timeout_pos.min_heap_idx) {
return min_heap_push_(s, e);
} else {
- unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+ size_t parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
/* The position of e has changed; we shift it up or down
* as needed. We can't need to do both. */
if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e))
}
}
-int min_heap_reserve_(min_heap_t* s, unsigned n)
+int min_heap_reserve_(min_heap_t* s, size_t n)
{
if (s->a < n)
{
struct event** p;
- unsigned a = s->a ? s->a * 2 : 8;
+ size_t a = s->a ? s->a * 2 : 8;
if (a < n)
a = n;
if (!(p = (struct event**)mm_realloc(s->p, a * sizeof *p)))
return 0;
}
-void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e)
+void min_heap_shift_up_unconditional_(min_heap_t* s, size_t hole_index, struct event* e)
{
- unsigned parent = (hole_index - 1) / 2;
+ size_t parent = (hole_index - 1) / 2;
do
{
(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
}
-void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
+void min_heap_shift_up_(min_heap_t* s, size_t hole_index, struct event* e)
{
- unsigned parent = (hole_index - 1) / 2;
+ size_t parent = (hole_index - 1) / 2;
while (hole_index && min_heap_elem_greater(s->p[parent], e))
{
(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
}
-void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
+void min_heap_shift_down_(min_heap_t* s, size_t hole_index, struct event* e)
{
- unsigned min_child = 2 * (hole_index + 1);
+ size_t min_child = 2 * (hole_index + 1);
while (min_child <= s->n)
{
min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);