]> granicus.if.org Git - libevent/blob - event.c
Merge pull request #1315 from yogo1212/http_per_socket_bebcb
[libevent] / event.c
1 /*
2  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29
30 #ifdef _WIN32
31 #include <winsock2.h>
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38 #include <sys/time.h>
39 #endif
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
43 #endif
44 #include <stdio.h>
45 #include <stdlib.h>
46 #ifdef EVENT__HAVE_UNISTD_H
47 #include <unistd.h>
48 #endif
49 #include <ctype.h>
50 #include <errno.h>
51 #include <signal.h>
52 #include <string.h>
53 #include <time.h>
54 #include <limits.h>
55 #ifdef EVENT__HAVE_FCNTL_H
56 #include <fcntl.h>
57 #endif
58
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/event_compat.h"
62 #include "event2/watch.h"
63 #include "event-internal.h"
64 #include "defer-internal.h"
65 #include "evthread-internal.h"
66 #include "event2/thread.h"
67 #include "event2/util.h"
68 #include "log-internal.h"
69 #include "evmap-internal.h"
70 #include "iocp-internal.h"
71 #include "changelist-internal.h"
72 #define HT_NO_CACHE_HASH_VALUES
73 #include "ht-internal.h"
74 #include "util-internal.h"
75
76
77 #ifdef EVENT__HAVE_WORKING_KQUEUE
78 #include "kqueue-internal.h"
79 #endif
80
81 #ifdef EVENT__HAVE_EVENT_PORTS
82 extern const struct eventop evportops;
83 #endif
84 #ifdef EVENT__HAVE_SELECT
85 extern const struct eventop selectops;
86 #endif
87 #ifdef EVENT__HAVE_POLL
88 extern const struct eventop pollops;
89 #endif
90 #ifdef EVENT__HAVE_EPOLL
91 extern const struct eventop epollops;
92 #endif
93 #ifdef EVENT__HAVE_WORKING_KQUEUE
94 extern const struct eventop kqops;
95 #endif
96 #ifdef EVENT__HAVE_DEVPOLL
97 extern const struct eventop devpollops;
98 #endif
99 #ifdef EVENT__HAVE_WEPOLL
100 extern const struct eventop wepollops;
101 #endif
102 #ifdef _WIN32
103 extern const struct eventop win32ops;
104 #endif
105
106 /* Array of backends in order of preference. */
107 static const struct eventop *eventops[] = {
108 #ifdef EVENT__HAVE_EVENT_PORTS
109         &evportops,
110 #endif
111 #ifdef EVENT__HAVE_WORKING_KQUEUE
112         &kqops,
113 #endif
114 #ifdef EVENT__HAVE_EPOLL
115         &epollops,
116 #endif
117 #ifdef EVENT__HAVE_DEVPOLL
118         &devpollops,
119 #endif
120 #ifdef EVENT__HAVE_POLL
121         &pollops,
122 #endif
123 #ifdef EVENT__HAVE_SELECT
124         &selectops,
125 #endif
126 #ifdef _WIN32
127         &win32ops,
128 #endif
129 #ifdef EVENT__HAVE_WEPOLL
130         &wepollops,
131 #endif
132         NULL
133 };
134
135 /* Global state; deprecated */
136 EVENT2_EXPORT_SYMBOL
137 struct event_base *event_global_current_base_ = NULL;
138 #define current_base event_global_current_base_
139
140 /* Global state */
141
142 static void *event_self_cbarg_ptr_ = NULL;
143
144 /* Prototypes */
145 static void     event_queue_insert_active(struct event_base *, struct event_callback *);
146 static void     event_queue_insert_active_later(struct event_base *, struct event_callback *);
147 static void     event_queue_insert_timeout(struct event_base *, struct event *);
148 static void     event_queue_insert_inserted(struct event_base *, struct event *);
149 static void     event_queue_remove_active(struct event_base *, struct event_callback *);
150 static void     event_queue_remove_active_later(struct event_base *, struct event_callback *);
151 static void     event_queue_remove_timeout(struct event_base *, struct event *);
152 static void     event_queue_remove_inserted(struct event_base *, struct event *);
153 static void event_queue_make_later_events_active(struct event_base *base);
154
155 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
156 static int event_del_(struct event *ev, int blocking);
157
158 #ifdef USE_REINSERT_TIMEOUT
159 /* This code seems buggy; only turn it on if we find out what the trouble is. */
160 static void     event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
161 #endif
162
163 static int      event_haveevents(struct event_base *);
164
165 static int      event_process_active(struct event_base *);
166
167 static int      timeout_next(struct event_base *, struct timeval **);
168 static void     timeout_process(struct event_base *);
169
170 static inline void      event_signal_closure(struct event_base *, struct event *ev);
171 static inline void      event_persist_closure(struct event_base *, struct event *ev);
172
173 static int      evthread_notify_base(struct event_base *base);
174
175 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
176     struct event *ev);
177
178 #ifndef EVENT__DISABLE_DEBUG_MODE
179 /* These functions implement a hashtable of which 'struct event *' structures
180  * have been setup or added.  We don't want to trust the content of the struct
181  * event itself, since we're trying to work through cases where an event gets
182  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
183  */
184
185 struct event_debug_entry {
186         HT_ENTRY(event_debug_entry) node;
187         const struct event *ptr;
188         unsigned added : 1;
189 };
190
191 static inline unsigned
192 hash_debug_entry(const struct event_debug_entry *e)
193 {
194         /* We need to do this silliness to convince compilers that we
195          * honestly mean to cast e->ptr to an integer, and discard any
196          * part of it that doesn't fit in an unsigned.
197          */
198         unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
199         /* Our hashtable implementation is pretty sensitive to low bits,
200          * and every struct event is over 64 bytes in size, so we can
201          * just say >>6. */
202         return (u >> 6);
203 }
204
205 static inline int
206 eq_debug_entry(const struct event_debug_entry *a,
207     const struct event_debug_entry *b)
208 {
209         return a->ptr == b->ptr;
210 }
211
212 int event_debug_mode_on_ = 0;
213
214
215 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
216 /**
217  * @brief debug mode variable which is set for any function/structure that needs
218  *        to be shared across threads (if thread support is enabled).
219  *
220  *        When and if evthreads are initialized, this variable will be evaluated,
221  *        and if set to something other than zero, this means the evthread setup 
222  *        functions were called out of order.
223  *
224  *        See: "Locks and threading" in the documentation.
225  */
226 int event_debug_created_threadable_ctx_ = 0;
227 #endif
228
229 /* Set if it's too late to enable event_debug_mode. */
230 static int event_debug_mode_too_late = 0;
231 #ifndef EVENT__DISABLE_THREAD_SUPPORT
232 static void *event_debug_map_lock_ = NULL;
233 #endif
234 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
235         HT_INITIALIZER();
236
237 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
238     eq_debug_entry)
239 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
240     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
241
242 /* record that ev is now setup (that is, ready for an add) */
243 static void event_debug_note_setup_(const struct event *ev)
244 {
245         struct event_debug_entry *dent, find;
246
247         if (!event_debug_mode_on_)
248                 goto out;
249
250         find.ptr = ev;
251         EVLOCK_LOCK(event_debug_map_lock_, 0);
252         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
253         if (dent) {
254                 dent->added = 0;
255         } else {
256                 dent = mm_malloc(sizeof(*dent));
257                 if (!dent)
258                         event_err(1,
259                             "Out of memory in debugging code");
260                 dent->ptr = ev;
261                 dent->added = 0;
262                 HT_INSERT(event_debug_map, &global_debug_map, dent);
263         }
264         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
265
266 out:
267         event_debug_mode_too_late = 1;
268 }
269 /* record that ev is no longer setup */
270 static void event_debug_note_teardown_(const struct event *ev)
271 {
272         struct event_debug_entry *dent, find;
273
274         if (!event_debug_mode_on_)
275                 goto out;
276
277         find.ptr = ev;
278         EVLOCK_LOCK(event_debug_map_lock_, 0);
279         dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
280         if (dent)
281                 mm_free(dent);
282         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
283
284 out:
285         event_debug_mode_too_late = 1;
286 }
287 /* Macro: record that ev is now added */
288 static void event_debug_note_add_(const struct event *ev)
289 {
290         struct event_debug_entry *dent,find;
291
292         if (!event_debug_mode_on_)
293                 goto out;
294
295         find.ptr = ev;
296         EVLOCK_LOCK(event_debug_map_lock_, 0);
297         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
298         if (dent) {
299                 dent->added = 1;
300         } else {
301                 event_errx(EVENT_ERR_ABORT_,
302                     "%s: noting an add on a non-setup event %p"
303                     " (events: 0x%x, fd: "EV_SOCK_FMT
304                     ", flags: 0x%x)",
305                     __func__, ev, ev->ev_events,
306                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
307         }
308         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
309
310 out:
311         event_debug_mode_too_late = 1;
312 }
313 /* record that ev is no longer added */
314 static void event_debug_note_del_(const struct event *ev)
315 {
316         struct event_debug_entry *dent, find;
317
318         if (!event_debug_mode_on_)
319                 goto out;
320
321         find.ptr = ev;
322         EVLOCK_LOCK(event_debug_map_lock_, 0);
323         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
324         if (dent) {
325                 dent->added = 0;
326         } else {
327                 event_errx(EVENT_ERR_ABORT_,
328                     "%s: noting a del on a non-setup event %p"
329                     " (events: 0x%x, fd: "EV_SOCK_FMT
330                     ", flags: 0x%x)",
331                     __func__, ev, ev->ev_events,
332                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
333         }
334         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
335
336 out:
337         event_debug_mode_too_late = 1;
338 }
339 /* assert that ev is setup (i.e., okay to add or inspect) */
340 static void event_debug_assert_is_setup_(const struct event *ev)
341 {
342         struct event_debug_entry *dent, find;
343
344         if (!event_debug_mode_on_)
345                 return;
346
347         find.ptr = ev;
348         EVLOCK_LOCK(event_debug_map_lock_, 0);
349         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
350         if (!dent) {
351                 event_errx(EVENT_ERR_ABORT_,
352                     "%s called on a non-initialized event %p"
353                     " (events: 0x%x, fd: "EV_SOCK_FMT
354                     ", flags: 0x%x)",
355                     __func__, ev, ev->ev_events,
356                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
357         }
358         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
359 }
360 /* assert that ev is not added (i.e., okay to tear down or set up again) */
361 static void event_debug_assert_not_added_(const struct event *ev)
362 {
363         struct event_debug_entry *dent, find;
364
365         if (!event_debug_mode_on_)
366                 return;
367
368         find.ptr = ev;
369         EVLOCK_LOCK(event_debug_map_lock_, 0);
370         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
371         if (dent && dent->added) {
372                 event_errx(EVENT_ERR_ABORT_,
373                     "%s called on an already added event %p"
374                     " (events: 0x%x, fd: "EV_SOCK_FMT", "
375                     "flags: 0x%x)",
376                     __func__, ev, ev->ev_events,
377                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
378         }
379         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
380 }
381 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
382 {
383         if (!event_debug_mode_on_)
384                 return;
385         if (fd < 0)
386                 return;
387
388 #ifndef _WIN32
389         {
390                 int flags;
391                 if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
392                         EVUTIL_ASSERT(flags & O_NONBLOCK);
393                 }
394         }
395 #endif
396 }
397 #else
398 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
399 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
400 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
401 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
402 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
403 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
404 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
405 #endif
406
407 #define EVENT_BASE_ASSERT_LOCKED(base)          \
408         EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
409
410 /* How often (in seconds) do we check for changes in wall clock time relative
411  * to monotonic time?  Set this to -1 for 'never.' */
412 #define CLOCK_SYNC_INTERVAL 5
413
414 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
415  * on 'base'.  If there is a cached time, return it.  Otherwise, use
416  * clock_gettime or gettimeofday as appropriate to find out the right time.
417  * Return 0 on success, -1 on failure.
418  */
419 static int
420 gettime(struct event_base *base, struct timeval *tp)
421 {
422         EVENT_BASE_ASSERT_LOCKED(base);
423
424         if (base->tv_cache.tv_sec) {
425                 *tp = base->tv_cache;
426                 return (0);
427         }
428
429         if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
430                 return -1;
431         }
432
433         if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
434             < tp->tv_sec) {
435                 struct timeval tv;
436                 evutil_gettimeofday(&tv,NULL);
437                 evutil_timersub(&tv, tp, &base->tv_clock_diff);
438                 base->last_updated_clock_diff = tp->tv_sec;
439         }
440
441         return 0;
442 }
443
444 int
445 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
446 {
447         int r;
448         if (!base) {
449                 base = current_base;
450                 if (!current_base)
451                         return evutil_gettimeofday(tv, NULL);
452         }
453
454         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
455         if (base->tv_cache.tv_sec == 0) {
456                 r = evutil_gettimeofday(tv, NULL);
457         } else {
458                 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
459                 r = 0;
460         }
461         EVBASE_RELEASE_LOCK(base, th_base_lock);
462         return r;
463 }
464
465 /** Make 'base' have no current cached time. */
466 static inline void
467 clear_time_cache(struct event_base *base)
468 {
469         base->tv_cache.tv_sec = 0;
470 }
471
472 /** Replace the cached time in 'base' with the current time. */
473 static inline void
474 update_time_cache(struct event_base *base)
475 {
476         base->tv_cache.tv_sec = 0;
477         if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
478             gettime(base, &base->tv_cache);
479 }
480
481 int
482 event_base_update_cache_time(struct event_base *base)
483 {
484
485         if (!base) {
486                 base = current_base;
487                 if (!current_base)
488                         return -1;
489         }
490
491         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
492         if (base->running_loop)
493                 update_time_cache(base);
494         EVBASE_RELEASE_LOCK(base, th_base_lock);
495         return 0;
496 }
497
498 static inline struct event *
499 event_callback_to_event(struct event_callback *evcb)
500 {
501         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
502         return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
503 }
504
505 static inline struct event_callback *
506 event_to_event_callback(struct event *ev)
507 {
508         return &ev->ev_evcallback;
509 }
510
511 struct event_base *
512 event_init(void)
513 {
514         struct event_base *base = event_base_new_with_config(NULL);
515
516         if (base == NULL) {
517                 event_errx(1, "%s: Unable to construct event_base", __func__);
518                 return NULL;
519         }
520
521         current_base = base;
522
523         return (base);
524 }
525
526 struct event_base *
527 event_base_new(void)
528 {
529         struct event_base *base = NULL;
530         struct event_config *cfg = event_config_new();
531         if (cfg) {
532                 base = event_base_new_with_config(cfg);
533                 event_config_free(cfg);
534         }
535         return base;
536 }
537
538 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
539  * avoid. */
540 static int
541 event_config_is_avoided_method(const struct event_config *cfg,
542     const char *method)
543 {
544         struct event_config_entry *entry;
545
546         TAILQ_FOREACH(entry, &cfg->entries, next) {
547                 if (entry->avoid_method != NULL &&
548                     strcmp(entry->avoid_method, method) == 0)
549                         return (1);
550         }
551
552         return (0);
553 }
554
555 /** Return true iff 'method' is disabled according to the environment. */
556 static int
557 event_is_method_disabled(const char *name)
558 {
559         char environment[64];
560         int i;
561
562         evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
563         for (i = 8; environment[i] != '\0'; ++i)
564                 environment[i] = EVUTIL_TOUPPER_(environment[i]);
565         /* Note that evutil_getenv_() ignores the environment entirely if
566          * we're setuid */
567         return (evutil_getenv_(environment) != NULL);
568 }
569
570 int
571 event_base_get_features(const struct event_base *base)
572 {
573         return base->evsel->features;
574 }
575
576 void
577 event_enable_debug_mode(void)
578 {
579 #ifndef EVENT__DISABLE_DEBUG_MODE
580         if (event_debug_mode_on_)
581                 event_errx(1, "%s was called twice!", __func__);
582         if (event_debug_mode_too_late)
583                 event_errx(1, "%s must be called *before* creating any events "
584                     "or event_bases",__func__);
585
586         event_debug_mode_on_ = 1;
587
588         HT_INIT(event_debug_map, &global_debug_map);
589 #endif
590 }
591
592 void
593 event_disable_debug_mode(void)
594 {
595 #ifndef EVENT__DISABLE_DEBUG_MODE
596         struct event_debug_entry **ent, *victim;
597
598         EVLOCK_LOCK(event_debug_map_lock_, 0);
599         for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
600                 victim = *ent;
601                 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
602                 mm_free(victim);
603         }
604         HT_CLEAR(event_debug_map, &global_debug_map);
605         EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
606
607         event_debug_mode_on_  = 0;
608 #endif
609 }
610
611 struct event_base *
612 event_base_new_with_config(const struct event_config *cfg)
613 {
614         int i;
615         struct event_base *base;
616         int should_check_environment;
617
618 #ifndef EVENT__DISABLE_DEBUG_MODE
619         event_debug_mode_too_late = 1;
620 #endif
621
622         if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
623                 event_warn("%s: calloc", __func__);
624                 return NULL;
625         }
626
627         if (cfg)
628                 base->flags = cfg->flags;
629
630         should_check_environment =
631             !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
632
633         {
634                 struct timeval tmp;
635                 int precise_time =
636                     cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
637                 int flags;
638                 if (should_check_environment && !precise_time) {
639                         precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
640                         if (precise_time) {
641                                 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
642                         }
643                 }
644                 flags = precise_time ? EV_MONOT_PRECISE : 0;
645                 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
646
647                 gettime(base, &tmp);
648         }
649
650         min_heap_ctor_(&base->timeheap);
651
652         base->sig.ev_signal_pair[0] = -1;
653         base->sig.ev_signal_pair[1] = -1;
654         base->th_notify_fd[0] = -1;
655         base->th_notify_fd[1] = -1;
656
657         TAILQ_INIT(&base->active_later_queue);
658
659         evmap_io_initmap_(&base->io);
660         evmap_signal_initmap_(&base->sigmap);
661         event_changelist_init_(&base->changelist);
662
663         base->evbase = NULL;
664
665         if (cfg) {
666                 memcpy(&base->max_dispatch_time,
667                     &cfg->max_dispatch_interval, sizeof(struct timeval));
668                 base->limit_callbacks_after_prio =
669                     cfg->limit_callbacks_after_prio;
670         } else {
671                 base->max_dispatch_time.tv_sec = -1;
672                 base->limit_callbacks_after_prio = 1;
673         }
674         if (cfg && cfg->max_dispatch_callbacks >= 0) {
675                 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
676         } else {
677                 base->max_dispatch_callbacks = INT_MAX;
678         }
679         if (base->max_dispatch_callbacks == INT_MAX &&
680             base->max_dispatch_time.tv_sec == -1)
681                 base->limit_callbacks_after_prio = INT_MAX;
682
683         for (i = 0; eventops[i] && !base->evbase; i++) {
684                 if (cfg != NULL) {
685                         /* determine if this backend should be avoided */
686                         if (event_config_is_avoided_method(cfg,
687                                 eventops[i]->name))
688                                 continue;
689                         if ((eventops[i]->features & cfg->require_features)
690                             != cfg->require_features)
691                                 continue;
692                 }
693
694                 /* also obey the environment variables */
695                 if (should_check_environment &&
696                     event_is_method_disabled(eventops[i]->name))
697                         continue;
698
699                 base->evsel = eventops[i];
700
701                 base->evbase = base->evsel->init(base);
702         }
703
704         if (base->evbase == NULL) {
705                 event_warnx("%s: no event mechanism available",
706                     __func__);
707                 base->evsel = NULL;
708                 event_base_free(base);
709                 return NULL;
710         }
711
712         if (evutil_getenv_("EVENT_SHOW_METHOD"))
713                 event_msgx("libevent using: %s", base->evsel->name);
714
715         /* allocate a single active event queue */
716         if (event_base_priority_init(base, 1) < 0) {
717                 event_base_free(base);
718                 return NULL;
719         }
720
721         /* prepare for threading */
722
723 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
724         event_debug_created_threadable_ctx_ = 1;
725 #endif
726
727 #ifndef EVENT__DISABLE_THREAD_SUPPORT
728         if (EVTHREAD_LOCKING_ENABLED() &&
729             (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
730                 int r;
731                 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
732                 EVTHREAD_ALLOC_COND(base->current_event_cond);
733                 r = evthread_make_base_notifiable(base);
734                 if (r<0) {
735                         event_warnx("%s: Unable to make base notifiable.", __func__);
736                         event_base_free(base);
737                         return NULL;
738                 }
739         }
740 #endif
741
742 #ifdef _WIN32
743         if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
744                 event_base_start_iocp_(base, cfg->n_cpus_hint);
745 #endif
746
747         /* initialize watcher lists */
748         for (i = 0; i < EVWATCH_MAX; ++i)
749                 TAILQ_INIT(&base->watchers[i]);
750
751         return (base);
752 }
753
754 int
755 event_base_start_iocp_(struct event_base *base, int n_cpus)
756 {
757 #ifdef _WIN32
758         if (base->iocp)
759                 return 0;
760         base->iocp = event_iocp_port_launch_(n_cpus);
761         if (!base->iocp) {
762                 event_warnx("%s: Couldn't launch IOCP", __func__);
763                 return -1;
764         }
765         return 0;
766 #else
767         return -1;
768 #endif
769 }
770
771 void
772 event_base_stop_iocp_(struct event_base *base)
773 {
774 #ifdef _WIN32
775         int rv;
776
777         if (!base->iocp)
778                 return;
779         rv = event_iocp_shutdown_(base->iocp, -1);
780         EVUTIL_ASSERT(rv >= 0);
781         base->iocp = NULL;
782 #endif
783 }
784
785 static int
786 event_base_cancel_single_callback_(struct event_base *base,
787     struct event_callback *evcb,
788     int run_finalizers)
789 {
790         int result = 0;
791
792         if (evcb->evcb_flags & EVLIST_INIT) {
793                 struct event *ev = event_callback_to_event(evcb);
794                 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
795                         event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
796                         result = 1;
797                 }
798         } else {
799                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
800                 event_callback_cancel_nolock_(base, evcb, 1);
801                 EVBASE_RELEASE_LOCK(base, th_base_lock);
802                 result = 1;
803         }
804
805         if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
806                 switch (evcb->evcb_closure) {
807                 case EV_CLOSURE_EVENT_FINALIZE:
808                 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
809                         struct event *ev = event_callback_to_event(evcb);
810                         ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
811                         if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
812                                 mm_free(ev);
813                         break;
814                 }
815                 case EV_CLOSURE_CB_FINALIZE:
816                         evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
817                         break;
818                 default:
819                         break;
820                 }
821         }
822         return result;
823 }
824
825 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
826 {
827         int deleted = 0, i;
828
829         for (i = 0; i < base->nactivequeues; ++i) {
830                 struct event_callback *evcb, *next;
831                 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
832                         next = TAILQ_NEXT(evcb, evcb_active_next);
833                         deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
834                         evcb = next;
835                 }
836         }
837
838         {
839                 struct event_callback *evcb;
840                 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
841                         deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
842                 }
843         }
844
845         return deleted;
846 }
847
848 static void
849 event_base_free_(struct event_base *base, int run_finalizers)
850 {
851         int i;
852         size_t n_deleted=0;
853         struct event *ev;
854         struct evwatch *watcher;
855         /* XXXX grab the lock? If there is contention when one thread frees
856          * the base, then the contending thread will be very sad soon. */
857
858         /* event_base_free(NULL) is how to free the current_base if we
859          * made it with event_init and forgot to hold a reference to it. */
860         if (base == NULL && current_base)
861                 base = current_base;
862         /* Don't actually free NULL. */
863         if (base == NULL) {
864                 event_warnx("%s: no base to free", __func__);
865                 return;
866         }
867         /* XXX(niels) - check for internal events first */
868
869 #ifdef _WIN32
870         event_base_stop_iocp_(base);
871 #endif
872
873         /* threading fds if we have them */
874         if (base->th_notify_fd[0] != -1) {
875                 event_del(&base->th_notify);
876                 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
877                 if (base->th_notify_fd[1] != -1)
878                         EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
879                 base->th_notify_fd[0] = -1;
880                 base->th_notify_fd[1] = -1;
881                 event_debug_unassign(&base->th_notify);
882         }
883
884         /* Delete all non-internal events. */
885         evmap_delete_all_(base);
886
887         while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
888                 event_del(ev);
889                 ++n_deleted;
890         }
891         for (i = 0; i < base->n_common_timeouts; ++i) {
892                 struct common_timeout_list *ctl =
893                     base->common_timeout_queues[i];
894                 event_del(&ctl->timeout_event); /* Internal; doesn't count */
895                 event_debug_unassign(&ctl->timeout_event);
896                 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
897                         struct event *next = TAILQ_NEXT(ev,
898                             ev_timeout_pos.ev_next_with_common_timeout);
899                         if (!(ev->ev_flags & EVLIST_INTERNAL)) {
900                                 event_del(ev);
901                                 ++n_deleted;
902                         }
903                         ev = next;
904                 }
905                 mm_free(ctl);
906         }
907         if (base->common_timeout_queues)
908                 mm_free(base->common_timeout_queues);
909
910         for (;;) {
911                 /* For finalizers we can register yet another finalizer out from
912                  * finalizer, and iff finalizer will be in active_later_queue we can
913                  * add finalizer to activequeues, and we will have events in
914                  * activequeues after this function returns, which is not what we want
915                  * (we even have an assertion for this).
916                  *
917                  * A simple case is bufferevent with underlying (i.e. filters).
918                  */
919                 int i = event_base_free_queues_(base, run_finalizers);
920                 event_debug(("%s: %d events freed", __func__, i));
921                 if (!i) {
922                         break;
923                 }
924                 n_deleted += i;
925         }
926
927         if (n_deleted)
928                 event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
929                         __func__, n_deleted));
930
931         while (LIST_FIRST(&base->once_events)) {
932                 struct event_once *eonce = LIST_FIRST(&base->once_events);
933                 LIST_REMOVE(eonce, next_once);
934                 mm_free(eonce);
935         }
936
937         if (base->evsel != NULL && base->evsel->dealloc != NULL)
938                 base->evsel->dealloc(base);
939
940         for (i = 0; i < base->nactivequeues; ++i)
941                 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
942
943         EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
944         min_heap_dtor_(&base->timeheap);
945
946         mm_free(base->activequeues);
947
948         evmap_io_clear_(&base->io);
949         evmap_signal_clear_(&base->sigmap);
950         event_changelist_freemem_(&base->changelist);
951
952         EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
953         EVTHREAD_FREE_COND(base->current_event_cond);
954
955         /* Free all event watchers */
956         for (i = 0; i < EVWATCH_MAX; ++i) {
957                 while (!TAILQ_EMPTY(&base->watchers[i])) {
958                         watcher = TAILQ_FIRST(&base->watchers[i]);
959                         TAILQ_REMOVE(&base->watchers[i], watcher, next);
960                         mm_free(watcher);
961                 }
962         }
963
964         /* If we're freeing current_base, there won't be a current_base. */
965         if (base == current_base)
966                 current_base = NULL;
967         mm_free(base);
968 }
969
970 void
971 event_base_free_nofinalize(struct event_base *base)
972 {
973         event_base_free_(base, 0);
974 }
975
976 void
977 event_base_free(struct event_base *base)
978 {
979         event_base_free_(base, 1);
980 }
981
982 /* Fake eventop; used to disable the backend temporarily inside event_reinit
983  * so that we can call event_del() on an event without telling the backend.
984  */
985 static int
986 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
987     short events, void *fdinfo)
988 {
989         return 0;
990 }
991 const struct eventop nil_eventop = {
992         "nil",
993         NULL, /* init: unused. */
994         NULL, /* add: unused. */
995         nil_backend_del, /* del: used, so needs to be killed. */
996         NULL, /* dispatch: unused. */
997         NULL, /* dealloc: unused. */
998         0, 0, 0
999 };
1000
1001 /* reinitialize the event base after a fork */
1002 int
1003 event_reinit(struct event_base *base)
1004 {
1005         const struct eventop *evsel;
1006         int res = 0;
1007         int was_notifiable = 0;
1008         int had_signal_added = 0;
1009
1010         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1011
1012         evsel = base->evsel;
1013
1014         /* check if this event mechanism requires reinit on the backend */
1015         if (evsel->need_reinit) {
1016                 /* We're going to call event_del() on our notify events (the
1017                  * ones that tell about signals and wakeup events).  But we
1018                  * don't actually want to tell the backend to change its
1019                  * state, since it might still share some resource (a kqueue,
1020                  * an epoll fd) with the parent process, and we don't want to
1021                  * delete the fds from _that_ backend, we temporarily stub out
1022                  * the evsel with a replacement.
1023                  */
1024                 base->evsel = &nil_eventop;
1025         }
1026
1027         /* We need to re-create a new signal-notification fd and a new
1028          * thread-notification fd.  Otherwise, we'll still share those with
1029          * the parent process, which would make any notification sent to them
1030          * get received by one or both of the event loops, more or less at
1031          * random.
1032          */
1033         if (base->sig.ev_signal_added) {
1034                 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1035                 event_debug_unassign(&base->sig.ev_signal);
1036                 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1037                 had_signal_added = 1;
1038                 base->sig.ev_signal_added = 0;
1039         }
1040         if (base->sig.ev_signal_pair[0] != -1)
1041                 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1042         if (base->sig.ev_signal_pair[1] != -1)
1043                 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1044         if (base->th_notify_fn != NULL) {
1045                 was_notifiable = 1;
1046                 base->th_notify_fn = NULL;
1047         }
1048         if (base->th_notify_fd[0] != -1) {
1049                 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1050                 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1051                 if (base->th_notify_fd[1] != -1)
1052                         EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1053                 base->th_notify_fd[0] = -1;
1054                 base->th_notify_fd[1] = -1;
1055                 event_debug_unassign(&base->th_notify);
1056         }
1057
1058         /* Replace the original evsel. */
1059         base->evsel = evsel;
1060
1061         if (evsel->need_reinit) {
1062                 /* Reconstruct the backend through brute-force, so that we do
1063                  * not share any structures with the parent process. For some
1064                  * backends, this is necessary: epoll and kqueue, for
1065                  * instance, have events associated with a kernel
1066                  * structure. If didn't reinitialize, we'd share that
1067                  * structure with the parent process, and any changes made by
1068                  * the parent would affect our backend's behavior (and vice
1069                  * versa).
1070                  */
1071                 if (base->evsel->dealloc != NULL)
1072                         base->evsel->dealloc(base);
1073                 base->evbase = evsel->init(base);
1074                 if (base->evbase == NULL) {
1075                         event_errx(1,
1076                            "%s: could not reinitialize event mechanism",
1077                            __func__);
1078                         res = -1;
1079                         goto done;
1080                 }
1081
1082                 /* Empty out the changelist (if any): we are starting from a
1083                  * blank slate. */
1084                 event_changelist_freemem_(&base->changelist);
1085
1086                 /* Tell the event maps to re-inform the backend about all
1087                  * pending events. This will make the signal notification
1088                  * event get re-created if necessary. */
1089                 if (evmap_reinit_(base) < 0)
1090                         res = -1;
1091         } else {
1092                 res = evsig_init_(base);
1093                 if (res == 0 && had_signal_added) {
1094                         res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1095                         if (res == 0)
1096                                 base->sig.ev_signal_added = 1;
1097                 }
1098         }
1099
1100         /* If we were notifiable before, and nothing just exploded, become
1101          * notifiable again. */
1102         if (was_notifiable && res == 0)
1103                 res = evthread_make_base_notifiable_nolock_(base);
1104
1105 done:
1106         EVBASE_RELEASE_LOCK(base, th_base_lock);
1107         return (res);
1108 }
1109
1110 /* Get the monotonic time for this event_base' timer */
1111 int
1112 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1113 {
1114   int rv = -1;
1115
1116   if (base && tv) {
1117     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1118     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1119     EVBASE_RELEASE_LOCK(base, th_base_lock);
1120   }
1121
1122   return rv;
1123 }
1124
1125 const char **
1126 event_get_supported_methods(void)
1127 {
1128         static const char **methods = NULL;
1129         const struct eventop **method;
1130         const char **tmp;
1131         int i = 0, k;
1132
1133         /* count all methods */
1134         for (method = &eventops[0]; *method != NULL; ++method) {
1135                 ++i;
1136         }
1137
1138         /* allocate one more than we need for the NULL pointer */
1139         tmp = mm_calloc((i + 1), sizeof(char *));
1140         if (tmp == NULL)
1141                 return (NULL);
1142
1143         /* populate the array with the supported methods */
1144         for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1145                 tmp[i++] = eventops[k]->name;
1146         }
1147         tmp[i] = NULL;
1148
1149         if (methods != NULL)
1150                 mm_free((char**)methods);
1151
1152         methods = tmp;
1153
1154         return (methods);
1155 }
1156
1157 struct event_config *
1158 event_config_new(void)
1159 {
1160         struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1161
1162         if (cfg == NULL)
1163                 return (NULL);
1164
1165         TAILQ_INIT(&cfg->entries);
1166         cfg->max_dispatch_interval.tv_sec = -1;
1167         cfg->max_dispatch_callbacks = INT_MAX;
1168         cfg->limit_callbacks_after_prio = 1;
1169
1170         return (cfg);
1171 }
1172
1173 static void
1174 event_config_entry_free(struct event_config_entry *entry)
1175 {
1176         if (entry->avoid_method != NULL)
1177                 mm_free((char *)entry->avoid_method);
1178         mm_free(entry);
1179 }
1180
1181 void
1182 event_config_free(struct event_config *cfg)
1183 {
1184         struct event_config_entry *entry;
1185
1186         while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1187                 TAILQ_REMOVE(&cfg->entries, entry, next);
1188                 event_config_entry_free(entry);
1189         }
1190         mm_free(cfg);
1191 }
1192
1193 int
1194 event_config_set_flag(struct event_config *cfg, int flag)
1195 {
1196         if (!cfg)
1197                 return -1;
1198         cfg->flags |= flag;
1199         return 0;
1200 }
1201
1202 int
1203 event_config_avoid_method(struct event_config *cfg, const char *method)
1204 {
1205         struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1206         if (entry == NULL)
1207                 return (-1);
1208
1209         if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1210                 mm_free(entry);
1211                 return (-1);
1212         }
1213
1214         TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1215
1216         return (0);
1217 }
1218
1219 int
1220 event_config_require_features(struct event_config *cfg,
1221     int features)
1222 {
1223         if (!cfg)
1224                 return (-1);
1225         cfg->require_features = features;
1226         return (0);
1227 }
1228
1229 int
1230 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1231 {
1232         if (!cfg)
1233                 return (-1);
1234         cfg->n_cpus_hint = cpus;
1235         return (0);
1236 }
1237
1238 int
1239 event_config_set_max_dispatch_interval(struct event_config *cfg,
1240     const struct timeval *max_interval, int max_callbacks, int min_priority)
1241 {
1242         if (max_interval)
1243                 memcpy(&cfg->max_dispatch_interval, max_interval,
1244                     sizeof(struct timeval));
1245         else
1246                 cfg->max_dispatch_interval.tv_sec = -1;
1247         cfg->max_dispatch_callbacks =
1248             max_callbacks >= 0 ? max_callbacks : INT_MAX;
1249         if (min_priority < 0)
1250                 min_priority = 0;
1251         cfg->limit_callbacks_after_prio = min_priority;
1252         return (0);
1253 }
1254
1255 int
1256 event_priority_init(int npriorities)
1257 {
1258         return event_base_priority_init(current_base, npriorities);
1259 }
1260
1261 int
1262 event_base_priority_init(struct event_base *base, int npriorities)
1263 {
1264         int i, r;
1265         r = -1;
1266
1267         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1268
1269         if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1270             || npriorities >= EVENT_MAX_PRIORITIES)
1271                 goto err;
1272
1273         if (npriorities == base->nactivequeues)
1274                 goto ok;
1275
1276         if (base->nactivequeues) {
1277                 mm_free(base->activequeues);
1278                 base->nactivequeues = 0;
1279         }
1280
1281         /* Allocate our priority queues */
1282         base->activequeues = (struct evcallback_list *)
1283           mm_calloc(npriorities, sizeof(struct evcallback_list));
1284         if (base->activequeues == NULL) {
1285                 event_warn("%s: calloc", __func__);
1286                 goto err;
1287         }
1288         base->nactivequeues = npriorities;
1289
1290         for (i = 0; i < base->nactivequeues; ++i) {
1291                 TAILQ_INIT(&base->activequeues[i]);
1292         }
1293
1294 ok:
1295         r = 0;
1296 err:
1297         EVBASE_RELEASE_LOCK(base, th_base_lock);
1298         return (r);
1299 }
1300
1301 int
1302 event_base_get_npriorities(struct event_base *base)
1303 {
1304
1305         int n;
1306         if (base == NULL)
1307                 base = current_base;
1308
1309         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1310         n = base->nactivequeues;
1311         EVBASE_RELEASE_LOCK(base, th_base_lock);
1312         return (n);
1313 }
1314
1315 int
1316 event_base_get_num_events(struct event_base *base, unsigned int type)
1317 {
1318         int r = 0;
1319
1320         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1321
1322         if (type & EVENT_BASE_COUNT_ACTIVE)
1323                 r += base->event_count_active;
1324
1325         if (type & EVENT_BASE_COUNT_VIRTUAL)
1326                 r += base->virtual_event_count;
1327
1328         if (type & EVENT_BASE_COUNT_ADDED)
1329                 r += base->event_count;
1330
1331         EVBASE_RELEASE_LOCK(base, th_base_lock);
1332
1333         return r;
1334 }
1335
1336 int
1337 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1338 {
1339         int r = 0;
1340
1341         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1342
1343         if (type & EVENT_BASE_COUNT_ACTIVE) {
1344                 r += base->event_count_active_max;
1345                 if (clear)
1346                         base->event_count_active_max = 0;
1347         }
1348
1349         if (type & EVENT_BASE_COUNT_VIRTUAL) {
1350                 r += base->virtual_event_count_max;
1351                 if (clear)
1352                         base->virtual_event_count_max = 0;
1353         }
1354
1355         if (type & EVENT_BASE_COUNT_ADDED) {
1356                 r += base->event_count_max;
1357                 if (clear)
1358                         base->event_count_max = 0;
1359         }
1360
1361         EVBASE_RELEASE_LOCK(base, th_base_lock);
1362
1363         return r;
1364 }
1365
1366 /* Returns true iff we're currently watching any events. */
1367 static int
1368 event_haveevents(struct event_base *base)
1369 {
1370         /* Caller must hold th_base_lock */
1371         return (base->virtual_event_count > 0 || base->event_count > 0);
1372 }
1373
1374 /* "closure" function called when processing active signal events */
1375 static inline void
1376 event_signal_closure(struct event_base *base, struct event *ev)
1377 {
1378         short ncalls;
1379         int should_break;
1380
1381         /* Allows deletes to work */
1382         ncalls = ev->ev_ncalls;
1383         if (ncalls != 0)
1384                 ev->ev_pncalls = &ncalls;
1385         EVBASE_RELEASE_LOCK(base, th_base_lock);
1386         while (ncalls) {
1387                 ncalls--;
1388                 ev->ev_ncalls = ncalls;
1389                 if (ncalls == 0)
1390                         ev->ev_pncalls = NULL;
1391                 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1392
1393                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1394                 should_break = base->event_break;
1395                 EVBASE_RELEASE_LOCK(base, th_base_lock);
1396
1397                 if (should_break) {
1398                         if (ncalls != 0)
1399                                 ev->ev_pncalls = NULL;
1400                         return;
1401                 }
1402         }
1403 }
1404
1405 /* Common timeouts are special timeouts that are handled as queues rather than
1406  * in the minheap.  This is more efficient than the minheap if we happen to
1407  * know that we're going to get several thousands of timeout events all with
1408  * the same timeout value.
1409  *
1410  * Since all our timeout handling code assumes timevals can be copied,
1411  * assigned, etc, we can't use "magic pointer" to encode these common
1412  * timeouts.  Searching through a list to see if every timeout is common could
1413  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1414  * is 32 bits long, but only uses 20 of those bits (since it can never be over
1415  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1416  * of index into the event_base's aray of common timeouts.
1417  */
1418
1419 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1420 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1421 #define COMMON_TIMEOUT_IDX_SHIFT 20
1422 #define COMMON_TIMEOUT_MASK     0xf0000000
1423 #define COMMON_TIMEOUT_MAGIC    0x50000000
1424
1425 #define COMMON_TIMEOUT_IDX(tv) \
1426         (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1427
1428 /** Return true iff if 'tv' is a common timeout in 'base' */
1429 static inline int
1430 is_common_timeout(const struct timeval *tv,
1431     const struct event_base *base)
1432 {
1433         int idx;
1434         if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1435                 return 0;
1436         idx = COMMON_TIMEOUT_IDX(tv);
1437         return idx < base->n_common_timeouts;
1438 }
1439
1440 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1441  * one is a common timeout. */
1442 static inline int
1443 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1444 {
1445         return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1446             (tv2->tv_usec & ~MICROSECONDS_MASK);
1447 }
1448
1449 /** Requires that 'tv' is a common timeout.  Return the corresponding
1450  * common_timeout_list. */
1451 static inline struct common_timeout_list *
1452 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1453 {
1454         return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1455 }
1456
1457 #if 0
1458 static inline int
1459 common_timeout_ok(const struct timeval *tv,
1460     struct event_base *base)
1461 {
1462         const struct timeval *expect =
1463             &get_common_timeout_list(base, tv)->duration;
1464         return tv->tv_sec == expect->tv_sec &&
1465             tv->tv_usec == expect->tv_usec;
1466 }
1467 #endif
1468
1469 /* Add the timeout for the first event in given common timeout list to the
1470  * event_base's minheap. */
1471 static void
1472 common_timeout_schedule(struct common_timeout_list *ctl,
1473     const struct timeval *now, struct event *head)
1474 {
1475         struct timeval timeout = head->ev_timeout;
1476         timeout.tv_usec &= MICROSECONDS_MASK;
1477         event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1478 }
1479
1480 /* Callback: invoked when the timeout for a common timeout queue triggers.
1481  * This means that (at least) the first event in that queue should be run,
1482  * and the timeout should be rescheduled if there are more events. */
1483 static void
1484 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1485 {
1486         struct timeval now;
1487         struct common_timeout_list *ctl = arg;
1488         struct event_base *base = ctl->base;
1489         struct event *ev = NULL;
1490         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1491         gettime(base, &now);
1492         while (1) {
1493                 ev = TAILQ_FIRST(&ctl->events);
1494                 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1495                     (ev->ev_timeout.tv_sec == now.tv_sec &&
1496                         (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1497                         break;
1498                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1499                 event_active_nolock_(ev, EV_TIMEOUT, 1);
1500         }
1501         if (ev)
1502                 common_timeout_schedule(ctl, &now, ev);
1503         EVBASE_RELEASE_LOCK(base, th_base_lock);
1504 }
1505
1506 #define MAX_COMMON_TIMEOUTS 256
1507
1508 const struct timeval *
1509 event_base_init_common_timeout(struct event_base *base,
1510     const struct timeval *duration)
1511 {
1512         int i;
1513         struct timeval tv;
1514         const struct timeval *result=NULL;
1515         struct common_timeout_list *new_ctl;
1516
1517         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1518         if (duration->tv_usec > 1000000) {
1519                 memcpy(&tv, duration, sizeof(struct timeval));
1520                 if (is_common_timeout(duration, base))
1521                         tv.tv_usec &= MICROSECONDS_MASK;
1522                 tv.tv_sec += tv.tv_usec / 1000000;
1523                 tv.tv_usec %= 1000000;
1524                 duration = &tv;
1525         }
1526         for (i = 0; i < base->n_common_timeouts; ++i) {
1527                 const struct common_timeout_list *ctl =
1528                     base->common_timeout_queues[i];
1529                 if (duration->tv_sec == ctl->duration.tv_sec &&
1530                     duration->tv_usec ==
1531                     (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1532                         EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1533                         result = &ctl->duration;
1534                         goto done;
1535                 }
1536         }
1537         if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1538                 event_warnx("%s: Too many common timeouts already in use; "
1539                     "we only support %d per event_base", __func__,
1540                     MAX_COMMON_TIMEOUTS);
1541                 goto done;
1542         }
1543         if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1544                 int n = base->n_common_timeouts < 16 ? 16 :
1545                     base->n_common_timeouts*2;
1546                 struct common_timeout_list **newqueues =
1547                     mm_realloc(base->common_timeout_queues,
1548                         n*sizeof(struct common_timeout_queue *));
1549                 if (!newqueues) {
1550                         event_warn("%s: realloc",__func__);
1551                         goto done;
1552                 }
1553                 base->n_common_timeouts_allocated = n;
1554                 base->common_timeout_queues = newqueues;
1555         }
1556         new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1557         if (!new_ctl) {
1558                 event_warn("%s: calloc",__func__);
1559                 goto done;
1560         }
1561         TAILQ_INIT(&new_ctl->events);
1562         new_ctl->duration.tv_sec = duration->tv_sec;
1563         new_ctl->duration.tv_usec =
1564             duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1565             (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1566         evtimer_assign(&new_ctl->timeout_event, base,
1567             common_timeout_callback, new_ctl);
1568         new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1569         event_priority_set(&new_ctl->timeout_event, 0);
1570         new_ctl->base = base;
1571         base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1572         result = &new_ctl->duration;
1573
1574 done:
1575         if (result)
1576                 EVUTIL_ASSERT(is_common_timeout(result, base));
1577
1578         EVBASE_RELEASE_LOCK(base, th_base_lock);
1579         return result;
1580 }
1581
1582 /* Closure function invoked when we're activating a persistent event. */
1583 static inline void
1584 event_persist_closure(struct event_base *base, struct event *ev)
1585 {
1586         void (*evcb_callback)(evutil_socket_t, short, void *);
1587
1588         // Other fields of *ev that must be stored before executing
1589         evutil_socket_t evcb_fd;
1590         short evcb_res;
1591         void *evcb_arg;
1592
1593         /* reschedule the persistent event if we have a timeout. */
1594         if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1595                 /* If there was a timeout, we want it to run at an interval of
1596                  * ev_io_timeout after the last time it was _scheduled_ for,
1597                  * not ev_io_timeout after _now_.  If it fired for another
1598                  * reason, though, the timeout ought to start ticking _now_. */
1599                 struct timeval run_at, relative_to, delay, now;
1600                 ev_uint32_t usec_mask = 0;
1601                 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1602                         &ev->ev_io_timeout));
1603                 gettime(base, &now);
1604                 if (is_common_timeout(&ev->ev_timeout, base)) {
1605                         delay = ev->ev_io_timeout;
1606                         usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1607                         delay.tv_usec &= MICROSECONDS_MASK;
1608                         if (ev->ev_res & EV_TIMEOUT) {
1609                                 relative_to = ev->ev_timeout;
1610                                 relative_to.tv_usec &= MICROSECONDS_MASK;
1611                         } else {
1612                                 relative_to = now;
1613                         }
1614                 } else {
1615                         delay = ev->ev_io_timeout;
1616                         if (ev->ev_res & EV_TIMEOUT) {
1617                                 relative_to = ev->ev_timeout;
1618                         } else {
1619                                 relative_to = now;
1620                         }
1621                 }
1622                 evutil_timeradd(&relative_to, &delay, &run_at);
1623                 if (evutil_timercmp(&run_at, &now, <)) {
1624                         /* Looks like we missed at least one invocation due to
1625                          * a clock jump, not running the event loop for a
1626                          * while, really slow callbacks, or
1627                          * something. Reschedule relative to now.
1628                          */
1629                         evutil_timeradd(&now, &delay, &run_at);
1630                 }
1631                 run_at.tv_usec |= usec_mask;
1632                 event_add_nolock_(ev, &run_at, 1);
1633         }
1634
1635         // Save our callback before we release the lock
1636         evcb_callback = ev->ev_callback;
1637         evcb_fd = ev->ev_fd;
1638         evcb_res = ev->ev_res;
1639         evcb_arg = ev->ev_arg;
1640
1641         // Release the lock
1642         EVBASE_RELEASE_LOCK(base, th_base_lock);
1643
1644         // Execute the callback
1645         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1646 }
1647
1648 /*
1649   Helper for event_process_active to process all the events in a single queue,
1650   releasing the lock as we go.  This function requires that the lock be held
1651   when it's invoked.  Returns -1 if we get a signal or an event_break that
1652   means we should stop processing any active events now.  Otherwise returns
1653   the number of non-internal event_callbacks that we processed.
1654 */
1655 static int
1656 event_process_active_single_queue(struct event_base *base,
1657     struct evcallback_list *activeq,
1658     int max_to_process, const struct timeval *endtime)
1659 {
1660         struct event_callback *evcb;
1661         int count = 0;
1662
1663         EVUTIL_ASSERT(activeq != NULL);
1664
1665         for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1666                 struct event *ev=NULL;
1667                 if (evcb->evcb_flags & EVLIST_INIT) {
1668                         ev = event_callback_to_event(evcb);
1669
1670                         if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1671                                 event_queue_remove_active(base, evcb);
1672                         else
1673                                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1674                         event_debug((
1675                             "event_process_active: event: %p, %s%s%scall %p",
1676                             ev,
1677                             ev->ev_res & EV_READ ? "EV_READ " : " ",
1678                             ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1679                             ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1680                             ev->ev_callback));
1681                 } else {
1682                         event_queue_remove_active(base, evcb);
1683                         event_debug(("event_process_active: event_callback %p, "
1684                                 "closure %d, call %p",
1685                                 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1686                 }
1687
1688                 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1689                         ++count;
1690
1691
1692                 base->current_event = evcb;
1693 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1694                 base->current_event_waiters = 0;
1695 #endif
1696
1697                 switch (evcb->evcb_closure) {
1698                 case EV_CLOSURE_EVENT_SIGNAL:
1699                         EVUTIL_ASSERT(ev != NULL);
1700                         event_signal_closure(base, ev);
1701                         break;
1702                 case EV_CLOSURE_EVENT_PERSIST:
1703                         EVUTIL_ASSERT(ev != NULL);
1704                         event_persist_closure(base, ev);
1705                         break;
1706                 case EV_CLOSURE_EVENT: {
1707                         void (*evcb_callback)(evutil_socket_t, short, void *);
1708                         short res;
1709                         EVUTIL_ASSERT(ev != NULL);
1710                         evcb_callback = *ev->ev_callback;
1711                         res = ev->ev_res;
1712                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1713                         evcb_callback(ev->ev_fd, res, ev->ev_arg);
1714                 }
1715                 break;
1716                 case EV_CLOSURE_CB_SELF: {
1717                         void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1718                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1719                         evcb_selfcb(evcb, evcb->evcb_arg);
1720                 }
1721                 break;
1722                 case EV_CLOSURE_EVENT_FINALIZE:
1723                 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1724                         void (*evcb_evfinalize)(struct event *, void *);
1725                         int evcb_closure = evcb->evcb_closure;
1726                         EVUTIL_ASSERT(ev != NULL);
1727                         base->current_event = NULL;
1728                         evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1729                         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1730                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1731                         event_debug_note_teardown_(ev);
1732                         evcb_evfinalize(ev, ev->ev_arg);
1733                         if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1734                                 mm_free(ev);
1735                 }
1736                 break;
1737                 case EV_CLOSURE_CB_FINALIZE: {
1738                         void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1739                         base->current_event = NULL;
1740                         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1741                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1742                         evcb_cbfinalize(evcb, evcb->evcb_arg);
1743                 }
1744                 break;
1745                 default:
1746                         EVUTIL_ASSERT(0);
1747                 }
1748
1749                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1750                 base->current_event = NULL;
1751 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1752                 if (base->current_event_waiters) {
1753                         base->current_event_waiters = 0;
1754                         EVTHREAD_COND_BROADCAST(base->current_event_cond);
1755                 }
1756 #endif
1757
1758                 if (base->event_break)
1759                         return -1;
1760                 if (count >= max_to_process)
1761                         return count;
1762                 if (count && endtime) {
1763                         struct timeval now;
1764                         update_time_cache(base);
1765                         gettime(base, &now);
1766                         if (evutil_timercmp(&now, endtime, >=))
1767                                 return count;
1768                 }
1769                 if (base->event_continue)
1770                         break;
1771         }
1772         return count;
1773 }
1774
1775 /*
1776  * Active events are stored in priority queues.  Lower priorities are always
1777  * process before higher priorities.  Low priority events can starve high
1778  * priority ones.
1779  */
1780
1781 static int
1782 event_process_active(struct event_base *base)
1783 {
1784         /* Caller must hold th_base_lock */
1785         struct evcallback_list *activeq = NULL;
1786         int i, c = 0;
1787         const struct timeval *endtime;
1788         struct timeval tv;
1789         const int maxcb = base->max_dispatch_callbacks;
1790         const int limit_after_prio = base->limit_callbacks_after_prio;
1791         if (base->max_dispatch_time.tv_sec >= 0) {
1792                 update_time_cache(base);
1793                 gettime(base, &tv);
1794                 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1795                 endtime = &tv;
1796         } else {
1797                 endtime = NULL;
1798         }
1799
1800         for (i = 0; i < base->nactivequeues; ++i) {
1801                 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1802                         base->event_running_priority = i;
1803                         activeq = &base->activequeues[i];
1804                         if (i < limit_after_prio)
1805                                 c = event_process_active_single_queue(base, activeq,
1806                                     INT_MAX, NULL);
1807                         else
1808                                 c = event_process_active_single_queue(base, activeq,
1809                                     maxcb, endtime);
1810                         if (c < 0) {
1811                                 goto done;
1812                         } else if (c > 0)
1813                                 break; /* Processed a real event; do not
1814                                         * consider lower-priority events */
1815                         /* If we get here, all of the events we processed
1816                          * were internal.  Continue. */
1817                 }
1818         }
1819
1820 done:
1821         base->event_running_priority = -1;
1822
1823         return c;
1824 }
1825
1826 /*
1827  * Wait continuously for events.  We exit only if no events are left.
1828  */
1829
1830 int
1831 event_dispatch(void)
1832 {
1833         return (event_loop(0));
1834 }
1835
1836 int
1837 event_base_dispatch(struct event_base *event_base)
1838 {
1839         return (event_base_loop(event_base, 0));
1840 }
1841
1842 const char *
1843 event_base_get_method(const struct event_base *base)
1844 {
1845         EVUTIL_ASSERT(base);
1846         return (base->evsel->name);
1847 }
1848
1849 /** Callback: used to implement event_base_loopexit by telling the event_base
1850  * that it's time to exit its loop. */
1851 static void
1852 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1853 {
1854         struct event_base *base = arg;
1855         base->event_gotterm = 1;
1856 }
1857
1858 int
1859 event_loopexit(const struct timeval *tv)
1860 {
1861         return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1862                     current_base, tv));
1863 }
1864
1865 int
1866 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1867 {
1868         return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1869                     event_base, tv));
1870 }
1871
1872 int
1873 event_loopbreak(void)
1874 {
1875         return (event_base_loopbreak(current_base));
1876 }
1877
1878 int
1879 event_base_loopbreak(struct event_base *event_base)
1880 {
1881         int r = 0;
1882         if (event_base == NULL)
1883                 return (-1);
1884
1885         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1886         event_base->event_break = 1;
1887
1888         if (EVBASE_NEED_NOTIFY(event_base)) {
1889                 r = evthread_notify_base(event_base);
1890         } else {
1891                 r = (0);
1892         }
1893         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1894         return r;
1895 }
1896
1897 int
1898 event_base_loopcontinue(struct event_base *event_base)
1899 {
1900         int r = 0;
1901         if (event_base == NULL)
1902                 return (-1);
1903
1904         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1905         event_base->event_continue = 1;
1906
1907         if (EVBASE_NEED_NOTIFY(event_base)) {
1908                 r = evthread_notify_base(event_base);
1909         } else {
1910                 r = (0);
1911         }
1912         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1913         return r;
1914 }
1915
1916 int
1917 event_base_got_break(struct event_base *event_base)
1918 {
1919         int res;
1920         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1921         res = event_base->event_break;
1922         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1923         return res;
1924 }
1925
1926 int
1927 event_base_got_exit(struct event_base *event_base)
1928 {
1929         int res;
1930         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1931         res = event_base->event_gotterm;
1932         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1933         return res;
1934 }
1935
1936 /* not thread safe */
1937
1938 int
1939 event_loop(int flags)
1940 {
1941         return event_base_loop(current_base, flags);
1942 }
1943
1944 int
1945 event_base_loop(struct event_base *base, int flags)
1946 {
1947         const struct eventop *evsel = base->evsel;
1948         struct timeval tv;
1949         struct timeval *tv_p;
1950         int res, done, retval = 0;
1951         struct evwatch_prepare_cb_info prepare_info;
1952         struct evwatch_check_cb_info check_info;
1953         struct evwatch *watcher;
1954
1955         /* Grab the lock.  We will release it inside evsel.dispatch, and again
1956          * as we invoke watchers and user callbacks. */
1957         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1958
1959         if (base->running_loop) {
1960                 event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1961                     " can run on each event_base at once.", __func__);
1962                 EVBASE_RELEASE_LOCK(base, th_base_lock);
1963                 return -1;
1964         }
1965
1966         base->running_loop = 1;
1967
1968         clear_time_cache(base);
1969
1970         if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1971                 evsig_set_base_(base);
1972
1973         done = 0;
1974
1975 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1976         base->th_owner_id = EVTHREAD_GET_ID();
1977 #endif
1978
1979         base->event_gotterm = base->event_break = 0;
1980
1981         while (!done) {
1982                 base->event_continue = 0;
1983                 base->n_deferreds_queued = 0;
1984
1985                 /* Terminate the loop if we have been asked to */
1986                 if (base->event_gotterm) {
1987                         break;
1988                 }
1989
1990                 if (base->event_break) {
1991                         break;
1992                 }
1993
1994                 tv_p = &tv;
1995                 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1996                         timeout_next(base, &tv_p);
1997                 } else {
1998                         /*
1999                          * if we have active events, we just poll new events
2000                          * without waiting.
2001                          */
2002                         evutil_timerclear(&tv);
2003                 }
2004
2005                 /* If we have no events, we just exit */
2006                 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
2007                     !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
2008                         event_debug(("%s: no events registered.", __func__));
2009                         retval = 1;
2010                         goto done;
2011                 }
2012
2013                 event_queue_make_later_events_active(base);
2014
2015                 /* Invoke prepare watchers before polling for events */
2016                 prepare_info.timeout = tv_p;
2017                 TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_PREPARE], next) {
2018                         EVBASE_RELEASE_LOCK(base, th_base_lock);
2019                         (*watcher->callback.prepare)(watcher, &prepare_info, watcher->arg);
2020                         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2021                 }
2022
2023                 clear_time_cache(base);
2024
2025                 res = evsel->dispatch(base, tv_p);
2026
2027                 if (res == -1) {
2028                         event_debug(("%s: dispatch returned unsuccessfully.",
2029                                 __func__));
2030                         retval = -1;
2031                         goto done;
2032                 }
2033
2034                 update_time_cache(base);
2035
2036                 /* Invoke check watchers after polling for events, and before
2037                  * processing them */
2038                 TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_CHECK], next) {
2039                         EVBASE_RELEASE_LOCK(base, th_base_lock);
2040                         (*watcher->callback.check)(watcher, &check_info, watcher->arg);
2041                         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2042                 }
2043
2044                 timeout_process(base);
2045
2046                 if (N_ACTIVE_CALLBACKS(base)) {
2047                         int n = event_process_active(base);
2048                         if ((flags & EVLOOP_ONCE)
2049                             && N_ACTIVE_CALLBACKS(base) == 0
2050                             && n != 0)
2051                                 done = 1;
2052                 } else if (flags & EVLOOP_NONBLOCK)
2053                         done = 1;
2054         }
2055         event_debug(("%s: asked to terminate loop.", __func__));
2056
2057 done:
2058         clear_time_cache(base);
2059         base->running_loop = 0;
2060
2061         EVBASE_RELEASE_LOCK(base, th_base_lock);
2062
2063         return (retval);
2064 }
2065
2066 /* One-time callback to implement event_base_once: invokes the user callback,
2067  * then deletes the allocated storage */
2068 static void
2069 event_once_cb(evutil_socket_t fd, short events, void *arg)
2070 {
2071         struct event_once *eonce = arg;
2072
2073         (*eonce->cb)(fd, events, eonce->arg);
2074         EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2075         LIST_REMOVE(eonce, next_once);
2076         EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2077         event_debug_unassign(&eonce->ev);
2078         mm_free(eonce);
2079 }
2080
2081 /* not threadsafe, event scheduled once. */
2082 int
2083 event_once(evutil_socket_t fd, short events,
2084     void (*callback)(evutil_socket_t, short, void *),
2085     void *arg, const struct timeval *tv)
2086 {
2087         return event_base_once(current_base, fd, events, callback, arg, tv);
2088 }
2089
2090 /* Schedules an event once */
2091 int
2092 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2093     void (*callback)(evutil_socket_t, short, void *),
2094     void *arg, const struct timeval *tv)
2095 {
2096         struct event_once *eonce;
2097         int res = 0;
2098         int activate = 0;
2099
2100         if (!base)
2101                 return (-1);
2102
2103         /* We cannot support signals that just fire once, or persistent
2104          * events. */
2105         if (events & (EV_SIGNAL|EV_PERSIST))
2106                 return (-1);
2107
2108         if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2109                 return (-1);
2110
2111         eonce->cb = callback;
2112         eonce->arg = arg;
2113
2114         if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2115                 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2116
2117                 if (tv == NULL || ! evutil_timerisset(tv)) {
2118                         /* If the event is going to become active immediately,
2119                          * don't put it on the timeout queue.  This is one
2120                          * idiom for scheduling a callback, so let's make
2121                          * it fast (and order-preserving). */
2122                         activate = 1;
2123                 }
2124         } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2125                 events &= EV_READ|EV_WRITE|EV_CLOSED;
2126
2127                 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2128         } else {
2129                 /* Bad event combination */
2130                 mm_free(eonce);
2131                 return (-1);
2132         }
2133
2134         if (res == 0) {
2135                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2136                 if (activate)
2137                         event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2138                 else
2139                         res = event_add_nolock_(&eonce->ev, tv, 0);
2140
2141                 if (res != 0) {
2142                         mm_free(eonce);
2143                         return (res);
2144                 } else {
2145                         LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2146                 }
2147                 EVBASE_RELEASE_LOCK(base, th_base_lock);
2148         }
2149
2150         return (0);
2151 }
2152
2153 int
2154 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2155 {
2156         if (!base)
2157                 base = current_base;
2158         if (arg == &event_self_cbarg_ptr_)
2159                 arg = ev;
2160
2161         if (!(events & EV_SIGNAL))
2162                 event_debug_assert_socket_nonblocking_(fd);
2163         event_debug_assert_not_added_(ev);
2164
2165         ev->ev_base = base;
2166
2167         ev->ev_callback = callback;
2168         ev->ev_arg = arg;
2169         ev->ev_fd = fd;
2170         ev->ev_events = events;
2171         ev->ev_res = 0;
2172         ev->ev_flags = EVLIST_INIT;
2173         ev->ev_ncalls = 0;
2174         ev->ev_pncalls = NULL;
2175
2176         if (events & EV_SIGNAL) {
2177                 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2178                         event_warnx("%s: EV_SIGNAL is not compatible with "
2179                             "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2180                         return -1;
2181                 }
2182                 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2183         } else {
2184                 if (events & EV_PERSIST) {
2185                         evutil_timerclear(&ev->ev_io_timeout);
2186                         ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2187                 } else {
2188                         ev->ev_closure = EV_CLOSURE_EVENT;
2189                 }
2190         }
2191
2192         min_heap_elem_init_(ev);
2193
2194         if (base != NULL) {
2195                 /* by default, we put new events into the middle priority */
2196                 ev->ev_pri = base->nactivequeues / 2;
2197         }
2198
2199         event_debug_note_setup_(ev);
2200
2201         return 0;
2202 }
2203
2204 int
2205 event_base_set(struct event_base *base, struct event *ev)
2206 {
2207         /* Only innocent events may be assigned to a different base */
2208         if (ev->ev_flags != EVLIST_INIT)
2209                 return (-1);
2210
2211         event_debug_assert_is_setup_(ev);
2212
2213         ev->ev_base = base;
2214         ev->ev_pri = base->nactivequeues/2;
2215
2216         return (0);
2217 }
2218
2219 void
2220 event_set(struct event *ev, evutil_socket_t fd, short events,
2221           void (*callback)(evutil_socket_t, short, void *), void *arg)
2222 {
2223         int r;
2224         r = event_assign(ev, current_base, fd, events, callback, arg);
2225         EVUTIL_ASSERT(r == 0);
2226 }
2227
2228 void *
2229 event_self_cbarg(void)
2230 {
2231         return &event_self_cbarg_ptr_;
2232 }
2233
2234 struct event *
2235 event_base_get_running_event(struct event_base *base)
2236 {
2237         struct event *ev = NULL;
2238         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2239         if (EVBASE_IN_THREAD(base)) {
2240                 struct event_callback *evcb = base->current_event;
2241                 if (evcb->evcb_flags & EVLIST_INIT)
2242                         ev = event_callback_to_event(evcb);
2243         }
2244         EVBASE_RELEASE_LOCK(base, th_base_lock);
2245         return ev;
2246 }
2247
2248 struct event *
2249 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2250 {
2251         struct event *ev;
2252         ev = mm_malloc(sizeof(struct event));
2253         if (ev == NULL)
2254                 return (NULL);
2255         if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2256                 mm_free(ev);
2257                 return (NULL);
2258         }
2259
2260         return (ev);
2261 }
2262
2263 void
2264 event_free(struct event *ev)
2265 {
2266         /* This is disabled, so that events which have been finalized be a
2267          * valid target for event_free(). That's */
2268         // event_debug_assert_is_setup_(ev);
2269
2270         /* make sure that this event won't be coming back to haunt us. */
2271         event_del(ev);
2272         event_debug_note_teardown_(ev);
2273         mm_free(ev);
2274
2275 }
2276
2277 void
2278 event_debug_unassign(struct event *ev)
2279 {
2280         event_debug_assert_not_added_(ev);
2281         event_debug_note_teardown_(ev);
2282
2283         ev->ev_flags &= ~EVLIST_INIT;
2284 }
2285
2286 #define EVENT_FINALIZE_FREE_ 0x10000
2287 static int
2288 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2289 {
2290         ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2291             EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2292
2293         event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2294         ev->ev_closure = closure;
2295         ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2296         event_active_nolock_(ev, EV_FINALIZE, 1);
2297         ev->ev_flags |= EVLIST_FINALIZING;
2298         return 0;
2299 }
2300
2301 static int
2302 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2303 {
2304         int r;
2305         struct event_base *base = ev->ev_base;
2306         if (EVUTIL_FAILURE_CHECK(!base)) {
2307                 event_warnx("%s: event has no event_base set.", __func__);
2308                 return -1;
2309         }
2310
2311         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2312         r = event_finalize_nolock_(base, flags, ev, cb);
2313         EVBASE_RELEASE_LOCK(base, th_base_lock);
2314         return r;
2315 }
2316
2317 int
2318 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2319 {
2320         return event_finalize_impl_(flags, ev, cb);
2321 }
2322
2323 int
2324 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2325 {
2326         return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2327 }
2328
2329 void
2330 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2331 {
2332         struct event *ev = NULL;
2333         if (evcb->evcb_flags & EVLIST_INIT) {
2334                 ev = event_callback_to_event(evcb);
2335                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2336         } else {
2337                 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2338         }
2339
2340         evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2341         evcb->evcb_cb_union.evcb_cbfinalize = cb;
2342         event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2343         evcb->evcb_flags |= EVLIST_FINALIZING;
2344 }
2345
2346 void
2347 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2348 {
2349         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2350         event_callback_finalize_nolock_(base, flags, evcb, cb);
2351         EVBASE_RELEASE_LOCK(base, th_base_lock);
2352 }
2353
2354 /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2355  * callback will be invoked on *one of them*, after they have *all* been
2356  * finalized. */
2357 int
2358 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2359 {
2360         int n_pending = 0, i;
2361
2362         if (base == NULL)
2363                 base = current_base;
2364
2365         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2366
2367         event_debug(("%s: %d events finalizing", __func__, n_cbs));
2368
2369         /* At most one can be currently executing; the rest we just
2370          * cancel... But we always make sure that the finalize callback
2371          * runs. */
2372         for (i = 0; i < n_cbs; ++i) {
2373                 struct event_callback *evcb = evcbs[i];
2374                 if (evcb == base->current_event) {
2375                         event_callback_finalize_nolock_(base, 0, evcb, cb);
2376                         ++n_pending;
2377                 } else {
2378                         event_callback_cancel_nolock_(base, evcb, 0);
2379                 }
2380         }
2381
2382         if (n_pending == 0) {
2383                 /* Just do the first one. */
2384                 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2385         }
2386
2387         EVBASE_RELEASE_LOCK(base, th_base_lock);
2388         return 0;
2389 }
2390
2391 /*
2392  * Set's the priority of an event - if an event is already scheduled
2393  * changing the priority is going to fail.
2394  */
2395
2396 int
2397 event_priority_set(struct event *ev, int pri)
2398 {
2399         event_debug_assert_is_setup_(ev);
2400
2401         if (ev->ev_flags & EVLIST_ACTIVE)
2402                 return (-1);
2403         if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2404                 return (-1);
2405
2406         ev->ev_pri = pri;
2407
2408         return (0);
2409 }
2410
2411 /*
2412  * Checks if a specific event is pending or scheduled.
2413  */
2414
2415 int
2416 event_pending(const struct event *ev, short event, struct timeval *tv)
2417 {
2418         int flags = 0;
2419
2420         if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2421                 event_warnx("%s: event has no event_base set.", __func__);
2422                 return 0;
2423         }
2424
2425         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2426         event_debug_assert_is_setup_(ev);
2427
2428         if (ev->ev_flags & EVLIST_INSERTED)
2429                 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2430         if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2431                 flags |= ev->ev_res;
2432         if (ev->ev_flags & EVLIST_TIMEOUT)
2433                 flags |= EV_TIMEOUT;
2434
2435         event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2436
2437         /* See if there is a timeout that we should report */
2438         if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2439                 struct timeval tmp = ev->ev_timeout;
2440                 tmp.tv_usec &= MICROSECONDS_MASK;
2441                 /* correctly remamp to real time */
2442                 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2443         }
2444
2445         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2446
2447         return (flags & event);
2448 }
2449
2450 int
2451 event_initialized(const struct event *ev)
2452 {
2453         if (!(ev->ev_flags & EVLIST_INIT))
2454                 return 0;
2455
2456         return 1;
2457 }
2458
2459 void
2460 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2461 {
2462         event_debug_assert_is_setup_(event);
2463
2464         if (base_out)
2465                 *base_out = event->ev_base;
2466         if (fd_out)
2467                 *fd_out = event->ev_fd;
2468         if (events_out)
2469                 *events_out = event->ev_events;
2470         if (callback_out)
2471                 *callback_out = event->ev_callback;
2472         if (arg_out)
2473                 *arg_out = event->ev_arg;
2474 }
2475
2476 size_t
2477 event_get_struct_event_size(void)
2478 {
2479         return sizeof(struct event);
2480 }
2481
2482 evutil_socket_t
2483 event_get_fd(const struct event *ev)
2484 {
2485         event_debug_assert_is_setup_(ev);
2486         return ev->ev_fd;
2487 }
2488
2489 struct event_base *
2490 event_get_base(const struct event *ev)
2491 {
2492         event_debug_assert_is_setup_(ev);
2493         return ev->ev_base;
2494 }
2495
2496 short
2497 event_get_events(const struct event *ev)
2498 {
2499         event_debug_assert_is_setup_(ev);
2500         return ev->ev_events;
2501 }
2502
2503 event_callback_fn
2504 event_get_callback(const struct event *ev)
2505 {
2506         event_debug_assert_is_setup_(ev);
2507         return ev->ev_callback;
2508 }
2509
2510 void *
2511 event_get_callback_arg(const struct event *ev)
2512 {
2513         event_debug_assert_is_setup_(ev);
2514         return ev->ev_arg;
2515 }
2516
2517 int
2518 event_get_priority(const struct event *ev)
2519 {
2520         event_debug_assert_is_setup_(ev);
2521         return ev->ev_pri;
2522 }
2523
2524 int
2525 event_add(struct event *ev, const struct timeval *tv)
2526 {
2527         int res;
2528
2529         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2530                 event_warnx("%s: event has no event_base set.", __func__);
2531                 return -1;
2532         }
2533
2534         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2535
2536         res = event_add_nolock_(ev, tv, 0);
2537
2538         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2539
2540         return (res);
2541 }
2542
2543 /* Helper callback: wake an event_base from another thread.  This version
2544  * works by writing a byte to one end of a socketpair, so that the event_base
2545  * listening on the other end will wake up as the corresponding event
2546  * triggers */
2547 static int
2548 evthread_notify_base_default(struct event_base *base)
2549 {
2550         char buf[1];
2551         int r;
2552         buf[0] = (char) 0;
2553 #ifdef _WIN32
2554         r = send(base->th_notify_fd[1], buf, 1, 0);
2555 #else
2556         r = write(base->th_notify_fd[1], buf, 1);
2557 #endif
2558         return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2559 }
2560
2561 #ifdef EVENT__HAVE_EVENTFD
2562 /* Helper callback: wake an event_base from another thread.  This version
2563  * assumes that you have a working eventfd() implementation. */
2564 static int
2565 evthread_notify_base_eventfd(struct event_base *base)
2566 {
2567         ev_uint64_t msg = 1;
2568         int r;
2569         do {
2570                 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2571         } while (r < 0 && errno == EAGAIN);
2572
2573         return (r < 0) ? -1 : 0;
2574 }
2575 #endif
2576
2577
2578 /** Tell the thread currently running the event_loop for base (if any) that it
2579  * needs to stop waiting in its dispatch function (if it is) and process all
2580  * active callbacks. */
2581 static int
2582 evthread_notify_base(struct event_base *base)
2583 {
2584         EVENT_BASE_ASSERT_LOCKED(base);
2585         if (!base->th_notify_fn)
2586                 return -1;
2587         if (base->is_notify_pending)
2588                 return 0;
2589         base->is_notify_pending = 1;
2590         return base->th_notify_fn(base);
2591 }
2592
2593 /* Implementation function to remove a timeout on a currently pending event.
2594  */
2595 int
2596 event_remove_timer_nolock_(struct event *ev)
2597 {
2598         struct event_base *base = ev->ev_base;
2599
2600         EVENT_BASE_ASSERT_LOCKED(base);
2601         event_debug_assert_is_setup_(ev);
2602
2603         event_debug(("event_remove_timer_nolock: event: %p", ev));
2604
2605         /* If it's not pending on a timeout, we don't need to do anything. */
2606         if (ev->ev_flags & EVLIST_TIMEOUT) {
2607                 event_queue_remove_timeout(base, ev);
2608                 evutil_timerclear(&ev->ev_io_timeout);
2609         }
2610
2611         return (0);
2612 }
2613
2614 int
2615 event_remove_timer(struct event *ev)
2616 {
2617         int res;
2618
2619         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2620                 event_warnx("%s: event has no event_base set.", __func__);
2621                 return -1;
2622         }
2623
2624         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2625
2626         res = event_remove_timer_nolock_(ev);
2627
2628         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2629
2630         return (res);
2631 }
2632
2633 /* Implementation function to add an event.  Works just like event_add,
2634  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2635  * we treat tv as an absolute time, not as an interval to add to the current
2636  * time */
2637 int
2638 event_add_nolock_(struct event *ev, const struct timeval *tv,
2639     int tv_is_absolute)
2640 {
2641         struct event_base *base = ev->ev_base;
2642         int res = 0;
2643         int notify = 0;
2644
2645         EVENT_BASE_ASSERT_LOCKED(base);
2646         event_debug_assert_is_setup_(ev);
2647
2648         event_debug((
2649                  "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2650                  ev,
2651                  EV_SOCK_ARG(ev->ev_fd),
2652                  ev->ev_events & EV_READ ? "EV_READ " : " ",
2653                  ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2654                  ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2655                  tv ? "EV_TIMEOUT " : " ",
2656                  ev->ev_callback));
2657
2658         EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2659
2660         if (ev->ev_flags & EVLIST_FINALIZING) {
2661                 /* XXXX debug */
2662                 return (-1);
2663         }
2664
2665         /*
2666          * prepare for timeout insertion further below, if we get a
2667          * failure on any step, we should not change any state.
2668          */
2669         if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2670                 if (min_heap_reserve_(&base->timeheap,
2671                         1 + min_heap_size_(&base->timeheap)) == -1)
2672                         return (-1);  /* ENOMEM == errno */
2673         }
2674
2675         /* If the main thread is currently executing a signal event's
2676          * callback, and we are not the main thread, then we want to wait
2677          * until the callback is done before we mess with the event, or else
2678          * we can race on ev_ncalls and ev_pncalls below. */
2679 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2680         if (base->current_event == event_to_event_callback(ev) &&
2681             (ev->ev_events & EV_SIGNAL)
2682             && !EVBASE_IN_THREAD(base)) {
2683                 ++base->current_event_waiters;
2684                 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2685         }
2686 #endif
2687
2688         if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2689             !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2690                 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2691                         res = evmap_io_add_(base, ev->ev_fd, ev);
2692                 else if (ev->ev_events & EV_SIGNAL)
2693                         res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2694                 if (res != -1)
2695                         event_queue_insert_inserted(base, ev);
2696                 if (res == 1) {
2697                         /* evmap says we need to notify the main thread. */
2698                         notify = 1;
2699                         res = 0;
2700                 }
2701         }
2702
2703         /*
2704          * we should change the timeout state only if the previous event
2705          * addition succeeded.
2706          */
2707         if (res != -1 && tv != NULL) {
2708                 struct timeval now;
2709                 int common_timeout;
2710 #ifdef USE_REINSERT_TIMEOUT
2711                 int was_common;
2712                 int old_timeout_idx;
2713 #endif
2714
2715                 /*
2716                  * for persistent timeout events, we remember the
2717                  * timeout value and re-add the event.
2718                  *
2719                  * If tv_is_absolute, this was already set.
2720                  */
2721                 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2722                         ev->ev_io_timeout = *tv;
2723
2724 #ifndef USE_REINSERT_TIMEOUT
2725                 if (ev->ev_flags & EVLIST_TIMEOUT) {
2726                         event_queue_remove_timeout(base, ev);
2727                 }
2728 #endif
2729
2730                 /* Check if it is active due to a timeout.  Rescheduling
2731                  * this timeout before the callback can be executed
2732                  * removes it from the active list. */
2733                 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2734                     (ev->ev_res & EV_TIMEOUT)) {
2735                         if (ev->ev_events & EV_SIGNAL) {
2736                                 /* See if we are just active executing
2737                                  * this event in a loop
2738                                  */
2739                                 if (ev->ev_ncalls && ev->ev_pncalls) {
2740                                         /* Abort loop */
2741                                         *ev->ev_pncalls = 0;
2742                                 }
2743                         }
2744
2745                         event_queue_remove_active(base, event_to_event_callback(ev));
2746                 }
2747
2748                 gettime(base, &now);
2749
2750                 common_timeout = is_common_timeout(tv, base);
2751 #ifdef USE_REINSERT_TIMEOUT
2752                 was_common = is_common_timeout(&ev->ev_timeout, base);
2753                 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2754 #endif
2755
2756                 if (tv_is_absolute) {
2757                         ev->ev_timeout = *tv;
2758                 } else if (common_timeout) {
2759                         struct timeval tmp = *tv;
2760                         tmp.tv_usec &= MICROSECONDS_MASK;
2761                         evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2762                         ev->ev_timeout.tv_usec |=
2763                             (tv->tv_usec & ~MICROSECONDS_MASK);
2764                 } else {
2765                         evutil_timeradd(&now, tv, &ev->ev_timeout);
2766                 }
2767
2768                 event_debug((
2769                          "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2770                          ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2771
2772 #ifdef USE_REINSERT_TIMEOUT
2773                 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2774 #else
2775                 event_queue_insert_timeout(base, ev);
2776 #endif
2777
2778                 if (common_timeout) {
2779                         struct common_timeout_list *ctl =
2780                             get_common_timeout_list(base, &ev->ev_timeout);
2781                         if (ev == TAILQ_FIRST(&ctl->events)) {
2782                                 common_timeout_schedule(ctl, &now, ev);
2783                         }
2784                 } else {
2785                         struct event* top = NULL;
2786                         /* See if the earliest timeout is now earlier than it
2787                          * was before: if so, we will need to tell the main
2788                          * thread to wake up earlier than it would otherwise.
2789                          * We double check the timeout of the top element to
2790                          * handle time distortions due to system suspension.
2791                          */
2792                         if (min_heap_elt_is_top_(ev))
2793                                 notify = 1;
2794                         else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2795                                          evutil_timercmp(&top->ev_timeout, &now, <))
2796                                 notify = 1;
2797                 }
2798         }
2799
2800         /* if we are not in the right thread, we need to wake up the loop */
2801         if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2802                 evthread_notify_base(base);
2803
2804         event_debug_note_add_(ev);
2805
2806         return (res);
2807 }
2808
2809 static int
2810 event_del_(struct event *ev, int blocking)
2811 {
2812         int res;
2813         struct event_base *base = ev->ev_base;
2814
2815         if (EVUTIL_FAILURE_CHECK(!base)) {
2816                 event_warnx("%s: event has no event_base set.", __func__);
2817                 return -1;
2818         }
2819
2820         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2821         res = event_del_nolock_(ev, blocking);
2822         EVBASE_RELEASE_LOCK(base, th_base_lock);
2823
2824         return (res);
2825 }
2826
2827 int
2828 event_del(struct event *ev)
2829 {
2830         return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2831 }
2832
2833 int
2834 event_del_block(struct event *ev)
2835 {
2836         return event_del_(ev, EVENT_DEL_BLOCK);
2837 }
2838
2839 int
2840 event_del_noblock(struct event *ev)
2841 {
2842         return event_del_(ev, EVENT_DEL_NOBLOCK);
2843 }
2844
2845 /** Helper for event_del: always called with th_base_lock held.
2846  *
2847  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2848  * EVEN_IF_FINALIZING} values. See those for more information.
2849  */
2850 int
2851 event_del_nolock_(struct event *ev, int blocking)
2852 {
2853         struct event_base *base;
2854         int res = 0, notify = 0;
2855
2856         event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2857                 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2858
2859         /* An event without a base has not been added */
2860         if (ev->ev_base == NULL)
2861                 return (-1);
2862
2863         EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2864
2865         if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2866                 if (ev->ev_flags & EVLIST_FINALIZING) {
2867                         /* XXXX Debug */
2868                         return 0;
2869                 }
2870         }
2871
2872         base = ev->ev_base;
2873
2874         EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2875
2876         /* See if we are just active executing this event in a loop */
2877         if (ev->ev_events & EV_SIGNAL) {
2878                 if (ev->ev_ncalls && ev->ev_pncalls) {
2879                         /* Abort loop */
2880                         *ev->ev_pncalls = 0;
2881                 }
2882         }
2883
2884         if (ev->ev_flags & EVLIST_TIMEOUT) {
2885                 /* NOTE: We never need to notify the main thread because of a
2886                  * deleted timeout event: all that could happen if we don't is
2887                  * that the dispatch loop might wake up too early.  But the
2888                  * point of notifying the main thread _is_ to wake up the
2889                  * dispatch loop early anyway, so we wouldn't gain anything by
2890                  * doing it.
2891                  */
2892                 event_queue_remove_timeout(base, ev);
2893         }
2894
2895         if (ev->ev_flags & EVLIST_ACTIVE)
2896                 event_queue_remove_active(base, event_to_event_callback(ev));
2897         else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2898                 event_queue_remove_active_later(base, event_to_event_callback(ev));
2899
2900         if (ev->ev_flags & EVLIST_INSERTED) {
2901                 event_queue_remove_inserted(base, ev);
2902                 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2903                         res = evmap_io_del_(base, ev->ev_fd, ev);
2904                 else
2905                         res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2906                 if (res == 1) {
2907                         /* evmap says we need to notify the main thread. */
2908                         notify = 1;
2909                         res = 0;
2910                 }
2911                 /* If we do not have events, let's notify event base so it can
2912                  * exit without waiting */
2913                 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2914                         notify = 1;
2915         }
2916
2917         /* if we are not in the right thread, we need to wake up the loop */
2918         if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2919                 evthread_notify_base(base);
2920
2921         event_debug_note_del_(ev);
2922
2923         /* If the main thread is currently executing this event's callback,
2924          * and we are not the main thread, then we want to wait until the
2925          * callback is done before returning. That way, when this function
2926          * returns, it will be safe to free the user-supplied argument.
2927          */
2928 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2929         if (blocking != EVENT_DEL_NOBLOCK &&
2930             base->current_event == event_to_event_callback(ev) &&
2931             !EVBASE_IN_THREAD(base) &&
2932             (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2933                 ++base->current_event_waiters;
2934                 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2935         }
2936 #endif
2937
2938         return (res);
2939 }
2940
2941 void
2942 event_active(struct event *ev, int res, short ncalls)
2943 {
2944         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2945                 event_warnx("%s: event has no event_base set.", __func__);
2946                 return;
2947         }
2948
2949         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2950
2951         event_debug_assert_is_setup_(ev);
2952
2953         event_active_nolock_(ev, res, ncalls);
2954
2955         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2956 }
2957
2958
2959 void
2960 event_active_nolock_(struct event *ev, int res, short ncalls)
2961 {
2962         struct event_base *base;
2963
2964         event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2965                 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2966
2967         base = ev->ev_base;
2968         EVENT_BASE_ASSERT_LOCKED(base);
2969
2970         if (ev->ev_flags & EVLIST_FINALIZING) {
2971                 /* XXXX debug */
2972                 return;
2973         }
2974
2975         switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2976         default:
2977         case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2978                 EVUTIL_ASSERT(0);
2979                 break;
2980         case EVLIST_ACTIVE:
2981                 /* We get different kinds of events, add them together */
2982                 ev->ev_res |= res;
2983                 return;
2984         case EVLIST_ACTIVE_LATER:
2985                 ev->ev_res |= res;
2986                 break;
2987         case 0:
2988                 ev->ev_res = res;
2989                 break;
2990         }
2991
2992         if (ev->ev_pri < base->event_running_priority)
2993                 base->event_continue = 1;
2994
2995         if (ev->ev_events & EV_SIGNAL) {
2996 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2997                 if (base->current_event == event_to_event_callback(ev) &&
2998                     !EVBASE_IN_THREAD(base)) {
2999                         ++base->current_event_waiters;
3000                         EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
3001                 }
3002 #endif
3003                 ev->ev_ncalls = ncalls;
3004                 ev->ev_pncalls = NULL;
3005         }
3006
3007         event_callback_activate_nolock_(base, event_to_event_callback(ev));
3008 }
3009
3010 void
3011 event_active_later_(struct event *ev, int res)
3012 {
3013         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
3014         event_active_later_nolock_(ev, res);
3015         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
3016 }
3017
3018 void
3019 event_active_later_nolock_(struct event *ev, int res)
3020 {
3021         struct event_base *base = ev->ev_base;
3022         EVENT_BASE_ASSERT_LOCKED(base);
3023
3024         if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3025                 /* We get different kinds of events, add them together */
3026                 ev->ev_res |= res;
3027                 return;
3028         }
3029
3030         ev->ev_res = res;
3031
3032         event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
3033 }
3034
3035 int
3036 event_callback_activate_(struct event_base *base,
3037     struct event_callback *evcb)
3038 {
3039         int r;
3040         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3041         r = event_callback_activate_nolock_(base, evcb);
3042         EVBASE_RELEASE_LOCK(base, th_base_lock);
3043         return r;
3044 }
3045
3046 int
3047 event_callback_activate_nolock_(struct event_base *base,
3048     struct event_callback *evcb)
3049 {
3050         int r = 1;
3051
3052         if (evcb->evcb_flags & EVLIST_FINALIZING)
3053                 return 0;
3054
3055         switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3056         default:
3057                 EVUTIL_ASSERT(0);
3058                 EVUTIL_FALLTHROUGH;
3059         case EVLIST_ACTIVE_LATER:
3060                 event_queue_remove_active_later(base, evcb);
3061                 r = 0;
3062                 break;
3063         case EVLIST_ACTIVE:
3064                 return 0;
3065         case 0:
3066                 break;
3067         }
3068
3069         event_queue_insert_active(base, evcb);
3070
3071         if (EVBASE_NEED_NOTIFY(base))
3072                 evthread_notify_base(base);
3073
3074         return r;
3075 }
3076
3077 int
3078 event_callback_activate_later_nolock_(struct event_base *base,
3079     struct event_callback *evcb)
3080 {
3081         if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3082                 return 0;
3083
3084         event_queue_insert_active_later(base, evcb);
3085         if (EVBASE_NEED_NOTIFY(base))
3086                 evthread_notify_base(base);
3087         return 1;
3088 }
3089
3090 void
3091 event_callback_init_(struct event_base *base,
3092     struct event_callback *cb)
3093 {
3094         memset(cb, 0, sizeof(*cb));
3095         cb->evcb_pri = base->nactivequeues - 1;
3096 }
3097
3098 int
3099 event_callback_cancel_(struct event_base *base,
3100     struct event_callback *evcb)
3101 {
3102         int r;
3103         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3104         r = event_callback_cancel_nolock_(base, evcb, 0);
3105         EVBASE_RELEASE_LOCK(base, th_base_lock);
3106         return r;
3107 }
3108
3109 int
3110 event_callback_cancel_nolock_(struct event_base *base,
3111     struct event_callback *evcb, int even_if_finalizing)
3112 {
3113         if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3114                 return 0;
3115
3116         if (evcb->evcb_flags & EVLIST_INIT)
3117                 return event_del_nolock_(event_callback_to_event(evcb),
3118                     even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3119
3120         switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3121         default:
3122         case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3123                 EVUTIL_ASSERT(0);
3124                 break;
3125         case EVLIST_ACTIVE:
3126                 /* We get different kinds of events, add them together */
3127                 event_queue_remove_active(base, evcb);
3128                 return 0;
3129         case EVLIST_ACTIVE_LATER:
3130                 event_queue_remove_active_later(base, evcb);
3131                 break;
3132         case 0:
3133                 break;
3134         }
3135
3136         return 0;
3137 }
3138
3139 void
3140 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3141 {
3142         memset(cb, 0, sizeof(*cb));
3143         cb->evcb_cb_union.evcb_selfcb = fn;
3144         cb->evcb_arg = arg;
3145         cb->evcb_pri = priority;
3146         cb->evcb_closure = EV_CLOSURE_CB_SELF;
3147 }
3148
3149 void
3150 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3151 {
3152         cb->evcb_pri = priority;
3153 }
3154
3155 void
3156 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3157 {
3158         if (!base)
3159                 base = current_base;
3160         event_callback_cancel_(base, cb);
3161 }
3162
3163 #define MAX_DEFERREDS_QUEUED 32
3164 int
3165 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3166 {
3167         int r = 1;
3168         if (!base)
3169                 base = current_base;
3170         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3171         if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3172                 r = event_callback_activate_later_nolock_(base, cb);
3173         } else {
3174                 r = event_callback_activate_nolock_(base, cb);
3175                 if (r) {
3176                         ++base->n_deferreds_queued;
3177                 }
3178         }
3179         EVBASE_RELEASE_LOCK(base, th_base_lock);
3180         return r;
3181 }
3182
3183 static int
3184 timeout_next(struct event_base *base, struct timeval **tv_p)
3185 {
3186         /* Caller must hold th_base_lock */
3187         struct timeval now;
3188         struct event *ev;
3189         struct timeval *tv = *tv_p;
3190         int res = 0;
3191
3192         ev = min_heap_top_(&base->timeheap);
3193
3194         if (ev == NULL) {
3195                 /* if no time-based events are active wait for I/O */
3196                 *tv_p = NULL;
3197                 goto out;
3198         }
3199
3200         if (gettime(base, &now) == -1) {
3201                 res = -1;
3202                 goto out;
3203         }
3204
3205         if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3206                 evutil_timerclear(tv);
3207                 goto out;
3208         }
3209
3210         evutil_timersub(&ev->ev_timeout, &now, tv);
3211
3212         EVUTIL_ASSERT(tv->tv_sec >= 0);
3213         EVUTIL_ASSERT(tv->tv_usec >= 0);
3214         event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3215
3216 out:
3217         return (res);
3218 }
3219
3220 /* Activate every event whose timeout has elapsed. */
3221 static void
3222 timeout_process(struct event_base *base)
3223 {
3224         /* Caller must hold lock. */
3225         struct timeval now;
3226         struct event *ev;
3227
3228         if (min_heap_empty_(&base->timeheap)) {
3229                 return;
3230         }
3231
3232         gettime(base, &now);
3233
3234         while ((ev = min_heap_top_(&base->timeheap))) {
3235                 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3236                         break;
3237
3238                 /* delete this event from the I/O queues */
3239                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3240
3241                 event_debug(("timeout_process: event: %p, call %p",
3242                          ev, ev->ev_callback));
3243                 event_active_nolock_(ev, EV_TIMEOUT, 1);
3244         }
3245 }
3246
3247 #ifndef MAX
3248 #define MAX(a,b) (((a)>(b))?(a):(b))
3249 #endif
3250
3251 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3252
3253 /* These are a fancy way to spell
3254      if (~flags & EVLIST_INTERNAL)
3255          base->event_count--/++;
3256 */
3257 #define DECR_EVENT_COUNT(base,flags) \
3258         ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3259 #define INCR_EVENT_COUNT(base,flags) do {                                       \
3260         ((base)->event_count += !((flags) & EVLIST_INTERNAL));                  \
3261         MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);          \
3262 } while (0)
3263
3264 static void
3265 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3266 {
3267         EVENT_BASE_ASSERT_LOCKED(base);
3268         if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3269                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3270                     ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3271                 return;
3272         }
3273         DECR_EVENT_COUNT(base, ev->ev_flags);
3274         ev->ev_flags &= ~EVLIST_INSERTED;
3275 }
3276 static void
3277 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3278 {
3279         EVENT_BASE_ASSERT_LOCKED(base);
3280         if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3281                 event_errx(1, "%s: %p not on queue %x", __func__,
3282                            evcb, EVLIST_ACTIVE);
3283                 return;
3284         }
3285         DECR_EVENT_COUNT(base, evcb->evcb_flags);
3286         evcb->evcb_flags &= ~EVLIST_ACTIVE;
3287         base->event_count_active--;
3288
3289         TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3290             evcb, evcb_active_next);
3291 }
3292 static void
3293 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3294 {
3295         EVENT_BASE_ASSERT_LOCKED(base);
3296         if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3297                 event_errx(1, "%s: %p not on queue %x", __func__,
3298                            evcb, EVLIST_ACTIVE_LATER);
3299                 return;
3300         }
3301         DECR_EVENT_COUNT(base, evcb->evcb_flags);
3302         evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3303         base->event_count_active--;
3304
3305         TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3306 }
3307 static void
3308 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3309 {
3310         EVENT_BASE_ASSERT_LOCKED(base);
3311         if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3312                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3313                     ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3314                 return;
3315         }
3316         DECR_EVENT_COUNT(base, ev->ev_flags);
3317         ev->ev_flags &= ~EVLIST_TIMEOUT;
3318
3319         if (is_common_timeout(&ev->ev_timeout, base)) {
3320                 struct common_timeout_list *ctl =
3321                     get_common_timeout_list(base, &ev->ev_timeout);
3322                 TAILQ_REMOVE(&ctl->events, ev,
3323                     ev_timeout_pos.ev_next_with_common_timeout);
3324         } else {
3325                 min_heap_erase_(&base->timeheap, ev);
3326         }
3327 }
3328
3329 #ifdef USE_REINSERT_TIMEOUT
3330 /* Remove and reinsert 'ev' into the timeout queue. */
3331 static void
3332 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3333     int was_common, int is_common, int old_timeout_idx)
3334 {
3335         struct common_timeout_list *ctl;
3336         if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3337                 event_queue_insert_timeout(base, ev);
3338                 return;
3339         }
3340
3341         switch ((was_common<<1) | is_common) {
3342         case 3: /* Changing from one common timeout to another */
3343                 ctl = base->common_timeout_queues[old_timeout_idx];
3344                 TAILQ_REMOVE(&ctl->events, ev,
3345                     ev_timeout_pos.ev_next_with_common_timeout);
3346                 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3347                 insert_common_timeout_inorder(ctl, ev);
3348                 break;
3349         case 2: /* Was common; is no longer common */
3350                 ctl = base->common_timeout_queues[old_timeout_idx];
3351                 TAILQ_REMOVE(&ctl->events, ev,
3352                     ev_timeout_pos.ev_next_with_common_timeout);
3353                 min_heap_push_(&base->timeheap, ev);
3354                 break;
3355         case 1: /* Wasn't common; has become common. */
3356                 min_heap_erase_(&base->timeheap, ev);
3357                 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3358                 insert_common_timeout_inorder(ctl, ev);
3359                 break;
3360         case 0: /* was in heap; is still on heap. */
3361                 min_heap_adjust_(&base->timeheap, ev);
3362                 break;
3363         default:
3364                 EVUTIL_ASSERT(0); /* unreachable */
3365                 break;
3366         }
3367 }
3368 #endif
3369
3370 /* Add 'ev' to the common timeout list in 'ev'. */
3371 static void
3372 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3373     struct event *ev)
3374 {
3375         struct event *e;
3376         /* By all logic, we should just be able to append 'ev' to the end of
3377          * ctl->events, since the timeout on each 'ev' is set to {the common
3378          * timeout} + {the time when we add the event}, and so the events
3379          * should arrive in order of their timeouts.  But just in case
3380          * there's some wacky threading issue going on, we do a search from
3381          * the end of 'ev' to find the right insertion point.
3382          */
3383         TAILQ_FOREACH_REVERSE(e, &ctl->events,
3384             event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3385                 /* This timercmp is a little sneaky, since both ev and e have
3386                  * magic values in tv_usec.  Fortunately, they ought to have
3387                  * the _same_ magic values in tv_usec.  Let's assert for that.
3388                  */
3389                 EVUTIL_ASSERT(
3390                         is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3391                 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3392                         TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3393                             ev_timeout_pos.ev_next_with_common_timeout);
3394                         return;
3395                 }
3396         }
3397         TAILQ_INSERT_HEAD(&ctl->events, ev,
3398             ev_timeout_pos.ev_next_with_common_timeout);
3399 }
3400
3401 static void
3402 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3403 {
3404         EVENT_BASE_ASSERT_LOCKED(base);
3405
3406         if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3407                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3408                     ev, EV_SOCK_ARG(ev->ev_fd));
3409                 return;
3410         }
3411
3412         INCR_EVENT_COUNT(base, ev->ev_flags);
3413
3414         ev->ev_flags |= EVLIST_INSERTED;
3415 }
3416
3417 static void
3418 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3419 {
3420         EVENT_BASE_ASSERT_LOCKED(base);
3421
3422         if (evcb->evcb_flags & EVLIST_ACTIVE) {
3423                 /* Double insertion is possible for active events */
3424                 return;
3425         }
3426
3427         INCR_EVENT_COUNT(base, evcb->evcb_flags);
3428
3429         evcb->evcb_flags |= EVLIST_ACTIVE;
3430
3431         base->event_count_active++;
3432         MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3433         EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3434         TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3435             evcb, evcb_active_next);
3436 }
3437
3438 static void
3439 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3440 {
3441         EVENT_BASE_ASSERT_LOCKED(base);
3442         if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3443                 /* Double insertion is possible */
3444                 return;
3445         }
3446
3447         INCR_EVENT_COUNT(base, evcb->evcb_flags);
3448         evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3449         base->event_count_active++;
3450         MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3451         EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3452         TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3453 }
3454
3455 static void
3456 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3457 {
3458         EVENT_BASE_ASSERT_LOCKED(base);
3459
3460         if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3461                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3462                     ev, EV_SOCK_ARG(ev->ev_fd));
3463                 return;
3464         }
3465
3466         INCR_EVENT_COUNT(base, ev->ev_flags);
3467
3468         ev->ev_flags |= EVLIST_TIMEOUT;
3469
3470         if (is_common_timeout(&ev->ev_timeout, base)) {
3471                 struct common_timeout_list *ctl =
3472                     get_common_timeout_list(base, &ev->ev_timeout);
3473                 insert_common_timeout_inorder(ctl, ev);
3474         } else {
3475                 min_heap_push_(&base->timeheap, ev);
3476         }
3477 }
3478
3479 static void
3480 event_queue_make_later_events_active(struct event_base *base)
3481 {
3482         struct event_callback *evcb;
3483         EVENT_BASE_ASSERT_LOCKED(base);
3484
3485         while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3486                 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3487                 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3488                 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3489                 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3490                 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3491         }
3492 }
3493
3494 /* Functions for debugging */
3495
3496 const char *
3497 event_get_version(void)
3498 {
3499         return (EVENT__VERSION);
3500 }
3501
3502 ev_uint32_t
3503 event_get_version_number(void)
3504 {
3505         return (EVENT__NUMERIC_VERSION);
3506 }
3507
3508 /*
3509  * No thread-safe interface needed - the information should be the same
3510  * for all threads.
3511  */
3512
3513 const char *
3514 event_get_method(void)
3515 {
3516         return (current_base->evsel->name);
3517 }
3518
3519 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3520 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3521 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3522 static void (*mm_free_fn_)(void *p) = NULL;
3523
3524 void *
3525 event_mm_malloc_(size_t sz)
3526 {
3527         if (sz == 0)
3528                 return NULL;
3529
3530         if (mm_malloc_fn_)
3531                 return mm_malloc_fn_(sz);
3532         else
3533                 return malloc(sz);
3534 }
3535
3536 void *
3537 event_mm_calloc_(size_t count, size_t size)
3538 {
3539         if (count == 0 || size == 0)
3540                 return NULL;
3541
3542         if (mm_malloc_fn_) {
3543                 size_t sz = count * size;
3544                 void *p = NULL;
3545                 if (count > EV_SIZE_MAX / size)
3546                         goto error;
3547                 p = mm_malloc_fn_(sz);
3548                 if (p)
3549                         return memset(p, 0, sz);
3550         } else {
3551                 void *p = calloc(count, size);
3552 #ifdef _WIN32
3553                 /* Windows calloc doesn't reliably set ENOMEM */
3554                 if (p == NULL)
3555                         goto error;
3556 #endif
3557                 return p;
3558         }
3559
3560 error:
3561         errno = ENOMEM;
3562         return NULL;
3563 }
3564
3565 char *
3566 event_mm_strdup_(const char *str)
3567 {
3568         if (!str) {
3569                 errno = EINVAL;
3570                 return NULL;
3571         }
3572
3573         if (mm_malloc_fn_) {
3574                 size_t ln = strlen(str);
3575                 void *p = NULL;
3576                 if (ln == EV_SIZE_MAX)
3577                         goto error;
3578                 p = mm_malloc_fn_(ln+1);
3579                 if (p)
3580                         return memcpy(p, str, ln+1);
3581         } else
3582 #ifdef _WIN32
3583                 return _strdup(str);
3584 #else
3585                 return strdup(str);
3586 #endif
3587
3588 error:
3589         errno = ENOMEM;
3590         return NULL;
3591 }
3592
3593 void *
3594 event_mm_realloc_(void *ptr, size_t sz)
3595 {
3596         if (mm_realloc_fn_)
3597                 return mm_realloc_fn_(ptr, sz);
3598         else
3599                 return realloc(ptr, sz);
3600 }
3601
3602 void
3603 event_mm_free_(void *ptr)
3604 {
3605         if (mm_free_fn_)
3606                 mm_free_fn_(ptr);
3607         else
3608                 free(ptr);
3609 }
3610
3611 void
3612 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3613                         void *(*realloc_fn)(void *ptr, size_t sz),
3614                         void (*free_fn)(void *ptr))
3615 {
3616         mm_malloc_fn_ = malloc_fn;
3617         mm_realloc_fn_ = realloc_fn;
3618         mm_free_fn_ = free_fn;
3619 }
3620 #endif
3621
3622 #ifdef EVENT__HAVE_EVENTFD
3623 static void
3624 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3625 {
3626         ev_uint64_t msg;
3627         ev_ssize_t r;
3628         struct event_base *base = arg;
3629
3630         r = read(fd, (void*) &msg, sizeof(msg));
3631         if (r<0 && errno != EAGAIN) {
3632                 event_sock_warn(fd, "Error reading from eventfd");
3633         }
3634         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3635         base->is_notify_pending = 0;
3636         EVBASE_RELEASE_LOCK(base, th_base_lock);
3637 }
3638 #endif
3639
3640 static void
3641 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3642 {
3643         unsigned char buf[1024];
3644         struct event_base *base = arg;
3645 #ifdef _WIN32
3646         while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3647                 ;
3648 #else
3649         while (read(fd, (char*)buf, sizeof(buf)) > 0)
3650                 ;
3651 #endif
3652
3653         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3654         base->is_notify_pending = 0;
3655         EVBASE_RELEASE_LOCK(base, th_base_lock);
3656 }
3657
3658 int
3659 evthread_make_base_notifiable(struct event_base *base)
3660 {
3661         int r;
3662         if (!base)
3663                 return -1;
3664
3665         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3666         r = evthread_make_base_notifiable_nolock_(base);
3667         EVBASE_RELEASE_LOCK(base, th_base_lock);
3668         return r;
3669 }
3670
3671 static int
3672 evthread_make_base_notifiable_nolock_(struct event_base *base)
3673 {
3674         void (*cb)(evutil_socket_t, short, void *);
3675         int (*notify)(struct event_base *);
3676
3677         if (base->th_notify_fn != NULL) {
3678                 /* The base is already notifiable: we're doing fine. */
3679                 return 0;
3680         }
3681
3682 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3683         if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3684                 base->th_notify_fn = event_kq_notify_base_;
3685                 /* No need to add an event here; the backend can wake
3686                  * itself up just fine. */
3687                 return 0;
3688         }
3689 #endif
3690
3691 #ifdef EVENT__HAVE_EVENTFD
3692         base->th_notify_fd[0] = evutil_eventfd_(0,
3693             EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3694         if (base->th_notify_fd[0] >= 0) {
3695                 base->th_notify_fd[1] = -1;
3696                 notify = evthread_notify_base_eventfd;
3697                 cb = evthread_notify_drain_eventfd;
3698         } else
3699 #endif
3700         if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3701                 notify = evthread_notify_base_default;
3702                 cb = evthread_notify_drain_default;
3703         } else {
3704                 return -1;
3705         }
3706
3707         base->th_notify_fn = notify;
3708
3709         /* prepare an event that we can use for wakeup */
3710         event_assign(&base->th_notify, base, base->th_notify_fd[0],
3711                                  EV_READ|EV_PERSIST, cb, base);
3712
3713         /* we need to mark this as internal event */
3714         base->th_notify.ev_flags |= EVLIST_INTERNAL;
3715         event_priority_set(&base->th_notify, 0);
3716
3717         return event_add_nolock_(&base->th_notify, NULL, 0);
3718 }
3719
3720 int
3721 event_base_foreach_event_nolock_(struct event_base *base,
3722     event_base_foreach_event_cb fn, void *arg)
3723 {
3724         int r, i;
3725         size_t u;
3726         struct event *ev;
3727
3728         /* Start out with all the EVLIST_INSERTED events. */
3729         if ((r = evmap_foreach_event_(base, fn, arg)))
3730                 return r;
3731
3732         /* Okay, now we deal with those events that have timeouts and are in
3733          * the min-heap. */
3734         for (u = 0; u < base->timeheap.n; ++u) {
3735                 ev = base->timeheap.p[u];
3736                 if (ev->ev_flags & EVLIST_INSERTED) {
3737                         /* we already processed this one */
3738                         continue;
3739                 }
3740                 if ((r = fn(base, ev, arg)))
3741                         return r;
3742         }
3743
3744         /* Now for the events in one of the timeout queues.
3745          * the min-heap. */
3746         for (i = 0; i < base->n_common_timeouts; ++i) {
3747                 struct common_timeout_list *ctl =
3748                     base->common_timeout_queues[i];
3749                 TAILQ_FOREACH(ev, &ctl->events,
3750                     ev_timeout_pos.ev_next_with_common_timeout) {
3751                         if (ev->ev_flags & EVLIST_INSERTED) {
3752                                 /* we already processed this one */
3753                                 continue;
3754                         }
3755                         if ((r = fn(base, ev, arg)))
3756                                 return r;
3757                 }
3758         }
3759
3760         /* Finally, we deal wit all the active events that we haven't touched
3761          * yet. */
3762         for (i = 0; i < base->nactivequeues; ++i) {
3763                 struct event_callback *evcb;
3764                 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3765                         if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3766                                 /* This isn't an event (evlist_init clear), or
3767                                  * we already processed it. (inserted or
3768                                  * timeout set */
3769                                 continue;
3770                         }
3771                         ev = event_callback_to_event(evcb);
3772                         if ((r = fn(base, ev, arg)))
3773                                 return r;
3774                 }
3775         }
3776
3777         return 0;
3778 }
3779
3780 /* Helper for event_base_dump_events: called on each event in the event base;
3781  * dumps only the inserted events. */
3782 static int
3783 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3784 {
3785         FILE *output = arg;
3786         const char *gloss = (e->ev_events & EV_SIGNAL) ?
3787             "sig" : "fd ";
3788
3789         if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3790                 return 0;
3791
3792         fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3793             (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3794             (e->ev_events&EV_READ)?" Read":"",
3795             (e->ev_events&EV_WRITE)?" Write":"",
3796             (e->ev_events&EV_CLOSED)?" EOF":"",
3797             (e->ev_events&EV_SIGNAL)?" Signal":"",
3798             (e->ev_events&EV_PERSIST)?" Persist":"",
3799             (e->ev_events&EV_ET)?" ET":"",
3800             (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3801         if (e->ev_flags & EVLIST_TIMEOUT) {
3802                 struct timeval tv;
3803                 tv.tv_sec = e->ev_timeout.tv_sec;
3804                 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3805                 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3806                 fprintf(output, " Timeout=%ld.%06d",
3807                     (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3808         }
3809         fputc('\n', output);
3810
3811         return 0;
3812 }
3813
3814 /* Helper for event_base_dump_events: called on each event in the event base;
3815  * dumps only the active events. */
3816 static int
3817 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3818 {
3819         FILE *output = arg;
3820         const char *gloss = (e->ev_events & EV_SIGNAL) ?
3821             "sig" : "fd ";
3822
3823         if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3824                 return 0;
3825
3826         fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3827             (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3828             (e->ev_res&EV_READ)?" Read":"",
3829             (e->ev_res&EV_WRITE)?" Write":"",
3830             (e->ev_res&EV_CLOSED)?" EOF":"",
3831             (e->ev_res&EV_SIGNAL)?" Signal":"",
3832             (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3833             (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3834             (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3835
3836         return 0;
3837 }
3838
3839 int
3840 event_base_foreach_event(struct event_base *base,
3841     event_base_foreach_event_cb fn, void *arg)
3842 {
3843         int r;
3844         if ((!fn) || (!base)) {
3845                 return -1;
3846         }
3847         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3848         r = event_base_foreach_event_nolock_(base, fn, arg);
3849         EVBASE_RELEASE_LOCK(base, th_base_lock);
3850         return r;
3851 }
3852
3853
3854 void
3855 event_base_dump_events(struct event_base *base, FILE *output)
3856 {
3857         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3858         fprintf(output, "Inserted events:\n");
3859         event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3860
3861         fprintf(output, "Active events:\n");
3862         event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3863         EVBASE_RELEASE_LOCK(base, th_base_lock);
3864 }
3865
3866 void
3867 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3868 {
3869         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3870
3871         /* Activate any non timer events */
3872         if (!(events & EV_TIMEOUT)) {
3873                 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3874         } else {
3875                 /* If we want to activate timer events, loop and activate each event with
3876                  * the same fd in both the timeheap and common timeouts list */
3877                 int i;
3878                 size_t u;
3879                 struct event *ev;
3880
3881                 for (u = 0; u < base->timeheap.n; ++u) {
3882                         ev = base->timeheap.p[u];
3883                         if (ev->ev_fd == fd) {
3884                                 event_active_nolock_(ev, EV_TIMEOUT, 1);
3885                         }
3886                 }
3887
3888                 for (i = 0; i < base->n_common_timeouts; ++i) {
3889                         struct common_timeout_list *ctl = base->common_timeout_queues[i];
3890                         TAILQ_FOREACH(ev, &ctl->events,
3891                                 ev_timeout_pos.ev_next_with_common_timeout) {
3892                                 if (ev->ev_fd == fd) {
3893                                         event_active_nolock_(ev, EV_TIMEOUT, 1);
3894                                 }
3895                         }
3896                 }
3897         }
3898
3899         EVBASE_RELEASE_LOCK(base, th_base_lock);
3900 }
3901
3902 void
3903 event_base_active_by_signal(struct event_base *base, int sig)
3904 {
3905         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3906         evmap_signal_active_(base, sig, 1);
3907         EVBASE_RELEASE_LOCK(base, th_base_lock);
3908 }
3909
3910
3911 void
3912 event_base_add_virtual_(struct event_base *base)
3913 {
3914         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3915         base->virtual_event_count++;
3916         MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3917         EVBASE_RELEASE_LOCK(base, th_base_lock);
3918 }
3919
3920 void
3921 event_base_del_virtual_(struct event_base *base)
3922 {
3923         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3924         EVUTIL_ASSERT(base->virtual_event_count > 0);
3925         base->virtual_event_count--;
3926         if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3927                 evthread_notify_base(base);
3928         EVBASE_RELEASE_LOCK(base, th_base_lock);
3929 }
3930
3931 static void
3932 event_free_debug_globals_locks(void)
3933 {
3934 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3935 #ifndef EVENT__DISABLE_DEBUG_MODE
3936         if (event_debug_map_lock_ != NULL) {
3937                 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3938                 event_debug_map_lock_ = NULL;
3939                 evthreadimpl_disable_lock_debugging_();
3940         }
3941 #endif /* EVENT__DISABLE_DEBUG_MODE */
3942 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3943         return;
3944 }
3945
3946 static void
3947 event_free_debug_globals(void)
3948 {
3949         event_free_debug_globals_locks();
3950 }
3951
3952 static void
3953 event_free_evsig_globals(void)
3954 {
3955         evsig_free_globals_();
3956 }
3957
3958 static void
3959 event_free_evutil_globals(void)
3960 {
3961         evutil_free_globals_();
3962 }
3963
3964 static void
3965 event_free_globals(void)
3966 {
3967         event_free_debug_globals();
3968         event_free_evsig_globals();
3969         event_free_evutil_globals();
3970 }
3971
3972 void
3973 libevent_global_shutdown(void)
3974 {
3975         event_disable_debug_mode();
3976         event_free_globals();
3977 }
3978
3979 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3980 int
3981 event_global_setup_locks_(const int enable_locks)
3982 {
3983 #ifndef EVENT__DISABLE_DEBUG_MODE
3984         EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3985 #endif
3986         if (evsig_global_setup_locks_(enable_locks) < 0)
3987                 return -1;
3988         if (evutil_global_setup_locks_(enable_locks) < 0)
3989                 return -1;
3990         if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3991                 return -1;
3992         return 0;
3993 }
3994 #endif
3995
3996 void
3997 event_base_assert_ok_(struct event_base *base)
3998 {
3999         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4000         event_base_assert_ok_nolock_(base);
4001         EVBASE_RELEASE_LOCK(base, th_base_lock);
4002 }
4003
4004 void
4005 event_base_assert_ok_nolock_(struct event_base *base)
4006 {
4007         int i;
4008         size_t u;
4009         int count;
4010
4011         /* First do checks on the per-fd and per-signal lists */
4012         evmap_check_integrity_(base);
4013
4014         /* Check the heap property */
4015         for (u = 1; u < base->timeheap.n; ++u) {
4016                 size_t parent = (u - 1) / 2;
4017                 struct event *ev, *p_ev;
4018                 ev = base->timeheap.p[u];
4019                 p_ev = base->timeheap.p[parent];
4020                 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4021                 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
4022                 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
4023         }
4024
4025         /* Check that the common timeouts are fine */
4026         for (i = 0; i < base->n_common_timeouts; ++i) {
4027                 struct common_timeout_list *ctl = base->common_timeout_queues[i];
4028                 struct event *last=NULL, *ev;
4029
4030                 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
4031
4032                 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
4033                         if (last)
4034                                 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
4035                         EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4036                         EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
4037                         EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4038                         last = ev;
4039                 }
4040         }
4041
4042         /* Check the active queues. */
4043         count = 0;
4044         for (i = 0; i < base->nactivequeues; ++i) {
4045                 struct event_callback *evcb;
4046                 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4047                 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4048                         EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4049                         EVUTIL_ASSERT(evcb->evcb_pri == i);
4050                         ++count;
4051                 }
4052         }
4053
4054         {
4055                 struct event_callback *evcb;
4056                 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4057                         EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4058                         ++count;
4059                 }
4060         }
4061         EVUTIL_ASSERT(count == base->event_count_active);
4062 }