2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
46 #ifdef EVENT__HAVE_UNISTD_H
55 #ifdef EVENT__HAVE_FCNTL_H
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/event_compat.h"
62 #include "event2/watch.h"
63 #include "event-internal.h"
64 #include "defer-internal.h"
65 #include "evthread-internal.h"
66 #include "event2/thread.h"
67 #include "event2/util.h"
68 #include "log-internal.h"
69 #include "evmap-internal.h"
70 #include "iocp-internal.h"
71 #include "changelist-internal.h"
72 #define HT_NO_CACHE_HASH_VALUES
73 #include "ht-internal.h"
74 #include "util-internal.h"
77 #ifdef EVENT__HAVE_WORKING_KQUEUE
78 #include "kqueue-internal.h"
81 #ifdef EVENT__HAVE_EVENT_PORTS
82 extern const struct eventop evportops;
84 #ifdef EVENT__HAVE_SELECT
85 extern const struct eventop selectops;
87 #ifdef EVENT__HAVE_POLL
88 extern const struct eventop pollops;
90 #ifdef EVENT__HAVE_EPOLL
91 extern const struct eventop epollops;
93 #ifdef EVENT__HAVE_WORKING_KQUEUE
94 extern const struct eventop kqops;
96 #ifdef EVENT__HAVE_DEVPOLL
97 extern const struct eventop devpollops;
99 #ifdef EVENT__HAVE_WEPOLL
100 extern const struct eventop wepollops;
103 extern const struct eventop win32ops;
106 /* Array of backends in order of preference. */
107 static const struct eventop *eventops[] = {
108 #ifdef EVENT__HAVE_EVENT_PORTS
111 #ifdef EVENT__HAVE_WORKING_KQUEUE
114 #ifdef EVENT__HAVE_EPOLL
117 #ifdef EVENT__HAVE_DEVPOLL
120 #ifdef EVENT__HAVE_POLL
123 #ifdef EVENT__HAVE_SELECT
129 #ifdef EVENT__HAVE_WEPOLL
135 /* Global state; deprecated */
137 struct event_base *event_global_current_base_ = NULL;
138 #define current_base event_global_current_base_
142 static void *event_self_cbarg_ptr_ = NULL;
145 static void event_queue_insert_active(struct event_base *, struct event_callback *);
146 static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
147 static void event_queue_insert_timeout(struct event_base *, struct event *);
148 static void event_queue_insert_inserted(struct event_base *, struct event *);
149 static void event_queue_remove_active(struct event_base *, struct event_callback *);
150 static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
151 static void event_queue_remove_timeout(struct event_base *, struct event *);
152 static void event_queue_remove_inserted(struct event_base *, struct event *);
153 static void event_queue_make_later_events_active(struct event_base *base);
155 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
156 static int event_del_(struct event *ev, int blocking);
158 #ifdef USE_REINSERT_TIMEOUT
159 /* This code seems buggy; only turn it on if we find out what the trouble is. */
160 static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
163 static int event_haveevents(struct event_base *);
165 static int event_process_active(struct event_base *);
167 static int timeout_next(struct event_base *, struct timeval **);
168 static void timeout_process(struct event_base *);
170 static inline void event_signal_closure(struct event_base *, struct event *ev);
171 static inline void event_persist_closure(struct event_base *, struct event *ev);
173 static int evthread_notify_base(struct event_base *base);
175 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
178 #ifndef EVENT__DISABLE_DEBUG_MODE
179 /* These functions implement a hashtable of which 'struct event *' structures
180 * have been setup or added. We don't want to trust the content of the struct
181 * event itself, since we're trying to work through cases where an event gets
182 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
185 struct event_debug_entry {
186 HT_ENTRY(event_debug_entry) node;
187 const struct event *ptr;
191 static inline unsigned
192 hash_debug_entry(const struct event_debug_entry *e)
194 /* We need to do this silliness to convince compilers that we
195 * honestly mean to cast e->ptr to an integer, and discard any
196 * part of it that doesn't fit in an unsigned.
198 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
199 /* Our hashtable implementation is pretty sensitive to low bits,
200 * and every struct event is over 64 bytes in size, so we can
206 eq_debug_entry(const struct event_debug_entry *a,
207 const struct event_debug_entry *b)
209 return a->ptr == b->ptr;
212 int event_debug_mode_on_ = 0;
215 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
217 * @brief debug mode variable which is set for any function/structure that needs
218 * to be shared across threads (if thread support is enabled).
220 * When and if evthreads are initialized, this variable will be evaluated,
221 * and if set to something other than zero, this means the evthread setup
222 * functions were called out of order.
224 * See: "Locks and threading" in the documentation.
226 int event_debug_created_threadable_ctx_ = 0;
229 /* Set if it's too late to enable event_debug_mode. */
230 static int event_debug_mode_too_late = 0;
231 #ifndef EVENT__DISABLE_THREAD_SUPPORT
232 static void *event_debug_map_lock_ = NULL;
234 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
237 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
239 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
240 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
242 /* record that ev is now setup (that is, ready for an add) */
243 static void event_debug_note_setup_(const struct event *ev)
245 struct event_debug_entry *dent, find;
247 if (!event_debug_mode_on_)
251 EVLOCK_LOCK(event_debug_map_lock_, 0);
252 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
256 dent = mm_malloc(sizeof(*dent));
259 "Out of memory in debugging code");
262 HT_INSERT(event_debug_map, &global_debug_map, dent);
264 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
267 event_debug_mode_too_late = 1;
269 /* record that ev is no longer setup */
270 static void event_debug_note_teardown_(const struct event *ev)
272 struct event_debug_entry *dent, find;
274 if (!event_debug_mode_on_)
278 EVLOCK_LOCK(event_debug_map_lock_, 0);
279 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
282 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
285 event_debug_mode_too_late = 1;
287 /* Macro: record that ev is now added */
288 static void event_debug_note_add_(const struct event *ev)
290 struct event_debug_entry *dent,find;
292 if (!event_debug_mode_on_)
296 EVLOCK_LOCK(event_debug_map_lock_, 0);
297 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
301 event_errx(EVENT_ERR_ABORT_,
302 "%s: noting an add on a non-setup event %p"
303 " (events: 0x%x, fd: "EV_SOCK_FMT
305 __func__, ev, ev->ev_events,
306 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
308 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
311 event_debug_mode_too_late = 1;
313 /* record that ev is no longer added */
314 static void event_debug_note_del_(const struct event *ev)
316 struct event_debug_entry *dent, find;
318 if (!event_debug_mode_on_)
322 EVLOCK_LOCK(event_debug_map_lock_, 0);
323 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
327 event_errx(EVENT_ERR_ABORT_,
328 "%s: noting a del on a non-setup event %p"
329 " (events: 0x%x, fd: "EV_SOCK_FMT
331 __func__, ev, ev->ev_events,
332 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
334 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
337 event_debug_mode_too_late = 1;
339 /* assert that ev is setup (i.e., okay to add or inspect) */
340 static void event_debug_assert_is_setup_(const struct event *ev)
342 struct event_debug_entry *dent, find;
344 if (!event_debug_mode_on_)
348 EVLOCK_LOCK(event_debug_map_lock_, 0);
349 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
351 event_errx(EVENT_ERR_ABORT_,
352 "%s called on a non-initialized event %p"
353 " (events: 0x%x, fd: "EV_SOCK_FMT
355 __func__, ev, ev->ev_events,
356 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
358 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
360 /* assert that ev is not added (i.e., okay to tear down or set up again) */
361 static void event_debug_assert_not_added_(const struct event *ev)
363 struct event_debug_entry *dent, find;
365 if (!event_debug_mode_on_)
369 EVLOCK_LOCK(event_debug_map_lock_, 0);
370 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
371 if (dent && dent->added) {
372 event_errx(EVENT_ERR_ABORT_,
373 "%s called on an already added event %p"
374 " (events: 0x%x, fd: "EV_SOCK_FMT", "
376 __func__, ev, ev->ev_events,
377 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
379 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
381 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
383 if (!event_debug_mode_on_)
391 if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
392 EVUTIL_ASSERT(flags & O_NONBLOCK);
398 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
399 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
400 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
401 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
402 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
403 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
404 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
407 #define EVENT_BASE_ASSERT_LOCKED(base) \
408 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
410 /* How often (in seconds) do we check for changes in wall clock time relative
411 * to monotonic time? Set this to -1 for 'never.' */
412 #define CLOCK_SYNC_INTERVAL 5
414 /** Set 'tp' to the current time according to 'base'. We must hold the lock
415 * on 'base'. If there is a cached time, return it. Otherwise, use
416 * clock_gettime or gettimeofday as appropriate to find out the right time.
417 * Return 0 on success, -1 on failure.
420 gettime(struct event_base *base, struct timeval *tp)
422 EVENT_BASE_ASSERT_LOCKED(base);
424 if (base->tv_cache.tv_sec) {
425 *tp = base->tv_cache;
429 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
433 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
436 evutil_gettimeofday(&tv,NULL);
437 evutil_timersub(&tv, tp, &base->tv_clock_diff);
438 base->last_updated_clock_diff = tp->tv_sec;
445 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
451 return evutil_gettimeofday(tv, NULL);
454 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
455 if (base->tv_cache.tv_sec == 0) {
456 r = evutil_gettimeofday(tv, NULL);
458 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
461 EVBASE_RELEASE_LOCK(base, th_base_lock);
465 /** Make 'base' have no current cached time. */
467 clear_time_cache(struct event_base *base)
469 base->tv_cache.tv_sec = 0;
472 /** Replace the cached time in 'base' with the current time. */
474 update_time_cache(struct event_base *base)
476 base->tv_cache.tv_sec = 0;
477 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
478 gettime(base, &base->tv_cache);
482 event_base_update_cache_time(struct event_base *base)
491 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
492 if (base->running_loop)
493 update_time_cache(base);
494 EVBASE_RELEASE_LOCK(base, th_base_lock);
498 static inline struct event *
499 event_callback_to_event(struct event_callback *evcb)
501 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
502 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
505 static inline struct event_callback *
506 event_to_event_callback(struct event *ev)
508 return &ev->ev_evcallback;
514 struct event_base *base = event_base_new_with_config(NULL);
517 event_errx(1, "%s: Unable to construct event_base", __func__);
529 struct event_base *base = NULL;
530 struct event_config *cfg = event_config_new();
532 base = event_base_new_with_config(cfg);
533 event_config_free(cfg);
538 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
541 event_config_is_avoided_method(const struct event_config *cfg,
544 struct event_config_entry *entry;
546 TAILQ_FOREACH(entry, &cfg->entries, next) {
547 if (entry->avoid_method != NULL &&
548 strcmp(entry->avoid_method, method) == 0)
555 /** Return true iff 'method' is disabled according to the environment. */
557 event_is_method_disabled(const char *name)
559 char environment[64];
562 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
563 for (i = 8; environment[i] != '\0'; ++i)
564 environment[i] = EVUTIL_TOUPPER_(environment[i]);
565 /* Note that evutil_getenv_() ignores the environment entirely if
567 return (evutil_getenv_(environment) != NULL);
571 event_base_get_features(const struct event_base *base)
573 return base->evsel->features;
577 event_enable_debug_mode(void)
579 #ifndef EVENT__DISABLE_DEBUG_MODE
580 if (event_debug_mode_on_)
581 event_errx(1, "%s was called twice!", __func__);
582 if (event_debug_mode_too_late)
583 event_errx(1, "%s must be called *before* creating any events "
584 "or event_bases",__func__);
586 event_debug_mode_on_ = 1;
588 HT_INIT(event_debug_map, &global_debug_map);
593 event_disable_debug_mode(void)
595 #ifndef EVENT__DISABLE_DEBUG_MODE
596 struct event_debug_entry **ent, *victim;
598 EVLOCK_LOCK(event_debug_map_lock_, 0);
599 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
601 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
604 HT_CLEAR(event_debug_map, &global_debug_map);
605 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
607 event_debug_mode_on_ = 0;
612 event_base_new_with_config(const struct event_config *cfg)
615 struct event_base *base;
616 int should_check_environment;
618 #ifndef EVENT__DISABLE_DEBUG_MODE
619 event_debug_mode_too_late = 1;
622 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
623 event_warn("%s: calloc", __func__);
628 base->flags = cfg->flags;
630 should_check_environment =
631 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
636 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
638 if (should_check_environment && !precise_time) {
639 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
641 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
644 flags = precise_time ? EV_MONOT_PRECISE : 0;
645 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
650 min_heap_ctor_(&base->timeheap);
652 base->sig.ev_signal_pair[0] = -1;
653 base->sig.ev_signal_pair[1] = -1;
654 base->th_notify_fd[0] = -1;
655 base->th_notify_fd[1] = -1;
657 TAILQ_INIT(&base->active_later_queue);
659 evmap_io_initmap_(&base->io);
660 evmap_signal_initmap_(&base->sigmap);
661 event_changelist_init_(&base->changelist);
666 memcpy(&base->max_dispatch_time,
667 &cfg->max_dispatch_interval, sizeof(struct timeval));
668 base->limit_callbacks_after_prio =
669 cfg->limit_callbacks_after_prio;
671 base->max_dispatch_time.tv_sec = -1;
672 base->limit_callbacks_after_prio = 1;
674 if (cfg && cfg->max_dispatch_callbacks >= 0) {
675 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
677 base->max_dispatch_callbacks = INT_MAX;
679 if (base->max_dispatch_callbacks == INT_MAX &&
680 base->max_dispatch_time.tv_sec == -1)
681 base->limit_callbacks_after_prio = INT_MAX;
683 for (i = 0; eventops[i] && !base->evbase; i++) {
685 /* determine if this backend should be avoided */
686 if (event_config_is_avoided_method(cfg,
689 if ((eventops[i]->features & cfg->require_features)
690 != cfg->require_features)
694 /* also obey the environment variables */
695 if (should_check_environment &&
696 event_is_method_disabled(eventops[i]->name))
699 base->evsel = eventops[i];
701 base->evbase = base->evsel->init(base);
704 if (base->evbase == NULL) {
705 event_warnx("%s: no event mechanism available",
708 event_base_free(base);
712 if (evutil_getenv_("EVENT_SHOW_METHOD"))
713 event_msgx("libevent using: %s", base->evsel->name);
715 /* allocate a single active event queue */
716 if (event_base_priority_init(base, 1) < 0) {
717 event_base_free(base);
721 /* prepare for threading */
723 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
724 event_debug_created_threadable_ctx_ = 1;
727 #ifndef EVENT__DISABLE_THREAD_SUPPORT
728 if (EVTHREAD_LOCKING_ENABLED() &&
729 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
731 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
732 EVTHREAD_ALLOC_COND(base->current_event_cond);
733 r = evthread_make_base_notifiable(base);
735 event_warnx("%s: Unable to make base notifiable.", __func__);
736 event_base_free(base);
743 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
744 event_base_start_iocp_(base, cfg->n_cpus_hint);
747 /* initialize watcher lists */
748 for (i = 0; i < EVWATCH_MAX; ++i)
749 TAILQ_INIT(&base->watchers[i]);
755 event_base_start_iocp_(struct event_base *base, int n_cpus)
760 base->iocp = event_iocp_port_launch_(n_cpus);
762 event_warnx("%s: Couldn't launch IOCP", __func__);
772 event_base_stop_iocp_(struct event_base *base)
779 rv = event_iocp_shutdown_(base->iocp, -1);
780 EVUTIL_ASSERT(rv >= 0);
786 event_base_cancel_single_callback_(struct event_base *base,
787 struct event_callback *evcb,
792 if (evcb->evcb_flags & EVLIST_INIT) {
793 struct event *ev = event_callback_to_event(evcb);
794 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
795 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
799 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
800 event_callback_cancel_nolock_(base, evcb, 1);
801 EVBASE_RELEASE_LOCK(base, th_base_lock);
805 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
806 switch (evcb->evcb_closure) {
807 case EV_CLOSURE_EVENT_FINALIZE:
808 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
809 struct event *ev = event_callback_to_event(evcb);
810 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
811 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
815 case EV_CLOSURE_CB_FINALIZE:
816 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
825 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
829 for (i = 0; i < base->nactivequeues; ++i) {
830 struct event_callback *evcb, *next;
831 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
832 next = TAILQ_NEXT(evcb, evcb_active_next);
833 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
839 struct event_callback *evcb;
840 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
841 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
849 event_base_free_(struct event_base *base, int run_finalizers)
854 struct evwatch *watcher;
855 /* XXXX grab the lock? If there is contention when one thread frees
856 * the base, then the contending thread will be very sad soon. */
858 /* event_base_free(NULL) is how to free the current_base if we
859 * made it with event_init and forgot to hold a reference to it. */
860 if (base == NULL && current_base)
862 /* Don't actually free NULL. */
864 event_warnx("%s: no base to free", __func__);
867 /* XXX(niels) - check for internal events first */
870 event_base_stop_iocp_(base);
873 /* threading fds if we have them */
874 if (base->th_notify_fd[0] != -1) {
875 event_del(&base->th_notify);
876 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
877 if (base->th_notify_fd[1] != -1)
878 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
879 base->th_notify_fd[0] = -1;
880 base->th_notify_fd[1] = -1;
881 event_debug_unassign(&base->th_notify);
884 /* Delete all non-internal events. */
885 evmap_delete_all_(base);
887 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
891 for (i = 0; i < base->n_common_timeouts; ++i) {
892 struct common_timeout_list *ctl =
893 base->common_timeout_queues[i];
894 event_del(&ctl->timeout_event); /* Internal; doesn't count */
895 event_debug_unassign(&ctl->timeout_event);
896 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
897 struct event *next = TAILQ_NEXT(ev,
898 ev_timeout_pos.ev_next_with_common_timeout);
899 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
907 if (base->common_timeout_queues)
908 mm_free(base->common_timeout_queues);
911 /* For finalizers we can register yet another finalizer out from
912 * finalizer, and iff finalizer will be in active_later_queue we can
913 * add finalizer to activequeues, and we will have events in
914 * activequeues after this function returns, which is not what we want
915 * (we even have an assertion for this).
917 * A simple case is bufferevent with underlying (i.e. filters).
919 int i = event_base_free_queues_(base, run_finalizers);
920 event_debug(("%s: %d events freed", __func__, i));
928 event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
929 __func__, n_deleted));
931 while (LIST_FIRST(&base->once_events)) {
932 struct event_once *eonce = LIST_FIRST(&base->once_events);
933 LIST_REMOVE(eonce, next_once);
937 if (base->evsel != NULL && base->evsel->dealloc != NULL)
938 base->evsel->dealloc(base);
940 for (i = 0; i < base->nactivequeues; ++i)
941 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
943 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
944 min_heap_dtor_(&base->timeheap);
946 mm_free(base->activequeues);
948 evmap_io_clear_(&base->io);
949 evmap_signal_clear_(&base->sigmap);
950 event_changelist_freemem_(&base->changelist);
952 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
953 EVTHREAD_FREE_COND(base->current_event_cond);
955 /* Free all event watchers */
956 for (i = 0; i < EVWATCH_MAX; ++i) {
957 while (!TAILQ_EMPTY(&base->watchers[i])) {
958 watcher = TAILQ_FIRST(&base->watchers[i]);
959 TAILQ_REMOVE(&base->watchers[i], watcher, next);
964 /* If we're freeing current_base, there won't be a current_base. */
965 if (base == current_base)
971 event_base_free_nofinalize(struct event_base *base)
973 event_base_free_(base, 0);
977 event_base_free(struct event_base *base)
979 event_base_free_(base, 1);
982 /* Fake eventop; used to disable the backend temporarily inside event_reinit
983 * so that we can call event_del() on an event without telling the backend.
986 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
987 short events, void *fdinfo)
991 const struct eventop nil_eventop = {
993 NULL, /* init: unused. */
994 NULL, /* add: unused. */
995 nil_backend_del, /* del: used, so needs to be killed. */
996 NULL, /* dispatch: unused. */
997 NULL, /* dealloc: unused. */
1001 /* reinitialize the event base after a fork */
1003 event_reinit(struct event_base *base)
1005 const struct eventop *evsel;
1007 int was_notifiable = 0;
1008 int had_signal_added = 0;
1010 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1012 evsel = base->evsel;
1014 /* check if this event mechanism requires reinit on the backend */
1015 if (evsel->need_reinit) {
1016 /* We're going to call event_del() on our notify events (the
1017 * ones that tell about signals and wakeup events). But we
1018 * don't actually want to tell the backend to change its
1019 * state, since it might still share some resource (a kqueue,
1020 * an epoll fd) with the parent process, and we don't want to
1021 * delete the fds from _that_ backend, we temporarily stub out
1022 * the evsel with a replacement.
1024 base->evsel = &nil_eventop;
1027 /* We need to re-create a new signal-notification fd and a new
1028 * thread-notification fd. Otherwise, we'll still share those with
1029 * the parent process, which would make any notification sent to them
1030 * get received by one or both of the event loops, more or less at
1033 if (base->sig.ev_signal_added) {
1034 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1035 event_debug_unassign(&base->sig.ev_signal);
1036 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1037 had_signal_added = 1;
1038 base->sig.ev_signal_added = 0;
1040 if (base->sig.ev_signal_pair[0] != -1)
1041 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1042 if (base->sig.ev_signal_pair[1] != -1)
1043 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1044 if (base->th_notify_fn != NULL) {
1046 base->th_notify_fn = NULL;
1048 if (base->th_notify_fd[0] != -1) {
1049 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1050 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1051 if (base->th_notify_fd[1] != -1)
1052 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1053 base->th_notify_fd[0] = -1;
1054 base->th_notify_fd[1] = -1;
1055 event_debug_unassign(&base->th_notify);
1058 /* Replace the original evsel. */
1059 base->evsel = evsel;
1061 if (evsel->need_reinit) {
1062 /* Reconstruct the backend through brute-force, so that we do
1063 * not share any structures with the parent process. For some
1064 * backends, this is necessary: epoll and kqueue, for
1065 * instance, have events associated with a kernel
1066 * structure. If didn't reinitialize, we'd share that
1067 * structure with the parent process, and any changes made by
1068 * the parent would affect our backend's behavior (and vice
1071 if (base->evsel->dealloc != NULL)
1072 base->evsel->dealloc(base);
1073 base->evbase = evsel->init(base);
1074 if (base->evbase == NULL) {
1076 "%s: could not reinitialize event mechanism",
1082 /* Empty out the changelist (if any): we are starting from a
1084 event_changelist_freemem_(&base->changelist);
1086 /* Tell the event maps to re-inform the backend about all
1087 * pending events. This will make the signal notification
1088 * event get re-created if necessary. */
1089 if (evmap_reinit_(base) < 0)
1092 res = evsig_init_(base);
1093 if (res == 0 && had_signal_added) {
1094 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1096 base->sig.ev_signal_added = 1;
1100 /* If we were notifiable before, and nothing just exploded, become
1101 * notifiable again. */
1102 if (was_notifiable && res == 0)
1103 res = evthread_make_base_notifiable_nolock_(base);
1106 EVBASE_RELEASE_LOCK(base, th_base_lock);
1110 /* Get the monotonic time for this event_base' timer */
1112 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1117 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1118 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1119 EVBASE_RELEASE_LOCK(base, th_base_lock);
1126 event_get_supported_methods(void)
1128 static const char **methods = NULL;
1129 const struct eventop **method;
1133 /* count all methods */
1134 for (method = &eventops[0]; *method != NULL; ++method) {
1138 /* allocate one more than we need for the NULL pointer */
1139 tmp = mm_calloc((i + 1), sizeof(char *));
1143 /* populate the array with the supported methods */
1144 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1145 tmp[i++] = eventops[k]->name;
1149 if (methods != NULL)
1150 mm_free((char**)methods);
1157 struct event_config *
1158 event_config_new(void)
1160 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1165 TAILQ_INIT(&cfg->entries);
1166 cfg->max_dispatch_interval.tv_sec = -1;
1167 cfg->max_dispatch_callbacks = INT_MAX;
1168 cfg->limit_callbacks_after_prio = 1;
1174 event_config_entry_free(struct event_config_entry *entry)
1176 if (entry->avoid_method != NULL)
1177 mm_free((char *)entry->avoid_method);
1182 event_config_free(struct event_config *cfg)
1184 struct event_config_entry *entry;
1186 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1187 TAILQ_REMOVE(&cfg->entries, entry, next);
1188 event_config_entry_free(entry);
1194 event_config_set_flag(struct event_config *cfg, int flag)
1203 event_config_avoid_method(struct event_config *cfg, const char *method)
1205 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1209 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1214 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1220 event_config_require_features(struct event_config *cfg,
1225 cfg->require_features = features;
1230 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1234 cfg->n_cpus_hint = cpus;
1239 event_config_set_max_dispatch_interval(struct event_config *cfg,
1240 const struct timeval *max_interval, int max_callbacks, int min_priority)
1243 memcpy(&cfg->max_dispatch_interval, max_interval,
1244 sizeof(struct timeval));
1246 cfg->max_dispatch_interval.tv_sec = -1;
1247 cfg->max_dispatch_callbacks =
1248 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1249 if (min_priority < 0)
1251 cfg->limit_callbacks_after_prio = min_priority;
1256 event_priority_init(int npriorities)
1258 return event_base_priority_init(current_base, npriorities);
1262 event_base_priority_init(struct event_base *base, int npriorities)
1267 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1269 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1270 || npriorities >= EVENT_MAX_PRIORITIES)
1273 if (npriorities == base->nactivequeues)
1276 if (base->nactivequeues) {
1277 mm_free(base->activequeues);
1278 base->nactivequeues = 0;
1281 /* Allocate our priority queues */
1282 base->activequeues = (struct evcallback_list *)
1283 mm_calloc(npriorities, sizeof(struct evcallback_list));
1284 if (base->activequeues == NULL) {
1285 event_warn("%s: calloc", __func__);
1288 base->nactivequeues = npriorities;
1290 for (i = 0; i < base->nactivequeues; ++i) {
1291 TAILQ_INIT(&base->activequeues[i]);
1297 EVBASE_RELEASE_LOCK(base, th_base_lock);
1302 event_base_get_npriorities(struct event_base *base)
1307 base = current_base;
1309 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1310 n = base->nactivequeues;
1311 EVBASE_RELEASE_LOCK(base, th_base_lock);
1316 event_base_get_num_events(struct event_base *base, unsigned int type)
1320 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1322 if (type & EVENT_BASE_COUNT_ACTIVE)
1323 r += base->event_count_active;
1325 if (type & EVENT_BASE_COUNT_VIRTUAL)
1326 r += base->virtual_event_count;
1328 if (type & EVENT_BASE_COUNT_ADDED)
1329 r += base->event_count;
1331 EVBASE_RELEASE_LOCK(base, th_base_lock);
1337 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1341 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1343 if (type & EVENT_BASE_COUNT_ACTIVE) {
1344 r += base->event_count_active_max;
1346 base->event_count_active_max = 0;
1349 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1350 r += base->virtual_event_count_max;
1352 base->virtual_event_count_max = 0;
1355 if (type & EVENT_BASE_COUNT_ADDED) {
1356 r += base->event_count_max;
1358 base->event_count_max = 0;
1361 EVBASE_RELEASE_LOCK(base, th_base_lock);
1366 /* Returns true iff we're currently watching any events. */
1368 event_haveevents(struct event_base *base)
1370 /* Caller must hold th_base_lock */
1371 return (base->virtual_event_count > 0 || base->event_count > 0);
1374 /* "closure" function called when processing active signal events */
1376 event_signal_closure(struct event_base *base, struct event *ev)
1381 /* Allows deletes to work */
1382 ncalls = ev->ev_ncalls;
1384 ev->ev_pncalls = &ncalls;
1385 EVBASE_RELEASE_LOCK(base, th_base_lock);
1388 ev->ev_ncalls = ncalls;
1390 ev->ev_pncalls = NULL;
1391 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1393 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1394 should_break = base->event_break;
1395 EVBASE_RELEASE_LOCK(base, th_base_lock);
1399 ev->ev_pncalls = NULL;
1405 /* Common timeouts are special timeouts that are handled as queues rather than
1406 * in the minheap. This is more efficient than the minheap if we happen to
1407 * know that we're going to get several thousands of timeout events all with
1408 * the same timeout value.
1410 * Since all our timeout handling code assumes timevals can be copied,
1411 * assigned, etc, we can't use "magic pointer" to encode these common
1412 * timeouts. Searching through a list to see if every timeout is common could
1413 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1414 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1415 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1416 * of index into the event_base's aray of common timeouts.
1419 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1420 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1421 #define COMMON_TIMEOUT_IDX_SHIFT 20
1422 #define COMMON_TIMEOUT_MASK 0xf0000000
1423 #define COMMON_TIMEOUT_MAGIC 0x50000000
1425 #define COMMON_TIMEOUT_IDX(tv) \
1426 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1428 /** Return true iff if 'tv' is a common timeout in 'base' */
1430 is_common_timeout(const struct timeval *tv,
1431 const struct event_base *base)
1434 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1436 idx = COMMON_TIMEOUT_IDX(tv);
1437 return idx < base->n_common_timeouts;
1440 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1441 * one is a common timeout. */
1443 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1445 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1446 (tv2->tv_usec & ~MICROSECONDS_MASK);
1449 /** Requires that 'tv' is a common timeout. Return the corresponding
1450 * common_timeout_list. */
1451 static inline struct common_timeout_list *
1452 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1454 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1459 common_timeout_ok(const struct timeval *tv,
1460 struct event_base *base)
1462 const struct timeval *expect =
1463 &get_common_timeout_list(base, tv)->duration;
1464 return tv->tv_sec == expect->tv_sec &&
1465 tv->tv_usec == expect->tv_usec;
1469 /* Add the timeout for the first event in given common timeout list to the
1470 * event_base's minheap. */
1472 common_timeout_schedule(struct common_timeout_list *ctl,
1473 const struct timeval *now, struct event *head)
1475 struct timeval timeout = head->ev_timeout;
1476 timeout.tv_usec &= MICROSECONDS_MASK;
1477 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1480 /* Callback: invoked when the timeout for a common timeout queue triggers.
1481 * This means that (at least) the first event in that queue should be run,
1482 * and the timeout should be rescheduled if there are more events. */
1484 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1487 struct common_timeout_list *ctl = arg;
1488 struct event_base *base = ctl->base;
1489 struct event *ev = NULL;
1490 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1491 gettime(base, &now);
1493 ev = TAILQ_FIRST(&ctl->events);
1494 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1495 (ev->ev_timeout.tv_sec == now.tv_sec &&
1496 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1498 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1499 event_active_nolock_(ev, EV_TIMEOUT, 1);
1502 common_timeout_schedule(ctl, &now, ev);
1503 EVBASE_RELEASE_LOCK(base, th_base_lock);
1506 #define MAX_COMMON_TIMEOUTS 256
1508 const struct timeval *
1509 event_base_init_common_timeout(struct event_base *base,
1510 const struct timeval *duration)
1514 const struct timeval *result=NULL;
1515 struct common_timeout_list *new_ctl;
1517 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1518 if (duration->tv_usec > 1000000) {
1519 memcpy(&tv, duration, sizeof(struct timeval));
1520 if (is_common_timeout(duration, base))
1521 tv.tv_usec &= MICROSECONDS_MASK;
1522 tv.tv_sec += tv.tv_usec / 1000000;
1523 tv.tv_usec %= 1000000;
1526 for (i = 0; i < base->n_common_timeouts; ++i) {
1527 const struct common_timeout_list *ctl =
1528 base->common_timeout_queues[i];
1529 if (duration->tv_sec == ctl->duration.tv_sec &&
1530 duration->tv_usec ==
1531 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1532 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1533 result = &ctl->duration;
1537 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1538 event_warnx("%s: Too many common timeouts already in use; "
1539 "we only support %d per event_base", __func__,
1540 MAX_COMMON_TIMEOUTS);
1543 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1544 int n = base->n_common_timeouts < 16 ? 16 :
1545 base->n_common_timeouts*2;
1546 struct common_timeout_list **newqueues =
1547 mm_realloc(base->common_timeout_queues,
1548 n*sizeof(struct common_timeout_queue *));
1550 event_warn("%s: realloc",__func__);
1553 base->n_common_timeouts_allocated = n;
1554 base->common_timeout_queues = newqueues;
1556 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1558 event_warn("%s: calloc",__func__);
1561 TAILQ_INIT(&new_ctl->events);
1562 new_ctl->duration.tv_sec = duration->tv_sec;
1563 new_ctl->duration.tv_usec =
1564 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1565 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1566 evtimer_assign(&new_ctl->timeout_event, base,
1567 common_timeout_callback, new_ctl);
1568 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1569 event_priority_set(&new_ctl->timeout_event, 0);
1570 new_ctl->base = base;
1571 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1572 result = &new_ctl->duration;
1576 EVUTIL_ASSERT(is_common_timeout(result, base));
1578 EVBASE_RELEASE_LOCK(base, th_base_lock);
1582 /* Closure function invoked when we're activating a persistent event. */
1584 event_persist_closure(struct event_base *base, struct event *ev)
1586 void (*evcb_callback)(evutil_socket_t, short, void *);
1588 // Other fields of *ev that must be stored before executing
1589 evutil_socket_t evcb_fd;
1593 /* reschedule the persistent event if we have a timeout. */
1594 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1595 /* If there was a timeout, we want it to run at an interval of
1596 * ev_io_timeout after the last time it was _scheduled_ for,
1597 * not ev_io_timeout after _now_. If it fired for another
1598 * reason, though, the timeout ought to start ticking _now_. */
1599 struct timeval run_at, relative_to, delay, now;
1600 ev_uint32_t usec_mask = 0;
1601 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1602 &ev->ev_io_timeout));
1603 gettime(base, &now);
1604 if (is_common_timeout(&ev->ev_timeout, base)) {
1605 delay = ev->ev_io_timeout;
1606 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1607 delay.tv_usec &= MICROSECONDS_MASK;
1608 if (ev->ev_res & EV_TIMEOUT) {
1609 relative_to = ev->ev_timeout;
1610 relative_to.tv_usec &= MICROSECONDS_MASK;
1615 delay = ev->ev_io_timeout;
1616 if (ev->ev_res & EV_TIMEOUT) {
1617 relative_to = ev->ev_timeout;
1622 evutil_timeradd(&relative_to, &delay, &run_at);
1623 if (evutil_timercmp(&run_at, &now, <)) {
1624 /* Looks like we missed at least one invocation due to
1625 * a clock jump, not running the event loop for a
1626 * while, really slow callbacks, or
1627 * something. Reschedule relative to now.
1629 evutil_timeradd(&now, &delay, &run_at);
1631 run_at.tv_usec |= usec_mask;
1632 event_add_nolock_(ev, &run_at, 1);
1635 // Save our callback before we release the lock
1636 evcb_callback = ev->ev_callback;
1637 evcb_fd = ev->ev_fd;
1638 evcb_res = ev->ev_res;
1639 evcb_arg = ev->ev_arg;
1642 EVBASE_RELEASE_LOCK(base, th_base_lock);
1644 // Execute the callback
1645 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1649 Helper for event_process_active to process all the events in a single queue,
1650 releasing the lock as we go. This function requires that the lock be held
1651 when it's invoked. Returns -1 if we get a signal or an event_break that
1652 means we should stop processing any active events now. Otherwise returns
1653 the number of non-internal event_callbacks that we processed.
1656 event_process_active_single_queue(struct event_base *base,
1657 struct evcallback_list *activeq,
1658 int max_to_process, const struct timeval *endtime)
1660 struct event_callback *evcb;
1663 EVUTIL_ASSERT(activeq != NULL);
1665 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1666 struct event *ev=NULL;
1667 if (evcb->evcb_flags & EVLIST_INIT) {
1668 ev = event_callback_to_event(evcb);
1670 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1671 event_queue_remove_active(base, evcb);
1673 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1675 "event_process_active: event: %p, %s%s%scall %p",
1677 ev->ev_res & EV_READ ? "EV_READ " : " ",
1678 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1679 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1682 event_queue_remove_active(base, evcb);
1683 event_debug(("event_process_active: event_callback %p, "
1684 "closure %d, call %p",
1685 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1688 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1692 base->current_event = evcb;
1693 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1694 base->current_event_waiters = 0;
1697 switch (evcb->evcb_closure) {
1698 case EV_CLOSURE_EVENT_SIGNAL:
1699 EVUTIL_ASSERT(ev != NULL);
1700 event_signal_closure(base, ev);
1702 case EV_CLOSURE_EVENT_PERSIST:
1703 EVUTIL_ASSERT(ev != NULL);
1704 event_persist_closure(base, ev);
1706 case EV_CLOSURE_EVENT: {
1707 void (*evcb_callback)(evutil_socket_t, short, void *);
1709 EVUTIL_ASSERT(ev != NULL);
1710 evcb_callback = *ev->ev_callback;
1712 EVBASE_RELEASE_LOCK(base, th_base_lock);
1713 evcb_callback(ev->ev_fd, res, ev->ev_arg);
1716 case EV_CLOSURE_CB_SELF: {
1717 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1718 EVBASE_RELEASE_LOCK(base, th_base_lock);
1719 evcb_selfcb(evcb, evcb->evcb_arg);
1722 case EV_CLOSURE_EVENT_FINALIZE:
1723 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1724 void (*evcb_evfinalize)(struct event *, void *);
1725 int evcb_closure = evcb->evcb_closure;
1726 EVUTIL_ASSERT(ev != NULL);
1727 base->current_event = NULL;
1728 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1729 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1730 EVBASE_RELEASE_LOCK(base, th_base_lock);
1731 event_debug_note_teardown_(ev);
1732 evcb_evfinalize(ev, ev->ev_arg);
1733 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1737 case EV_CLOSURE_CB_FINALIZE: {
1738 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1739 base->current_event = NULL;
1740 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1741 EVBASE_RELEASE_LOCK(base, th_base_lock);
1742 evcb_cbfinalize(evcb, evcb->evcb_arg);
1749 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1750 base->current_event = NULL;
1751 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1752 if (base->current_event_waiters) {
1753 base->current_event_waiters = 0;
1754 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1758 if (base->event_break)
1760 if (count >= max_to_process)
1762 if (count && endtime) {
1764 update_time_cache(base);
1765 gettime(base, &now);
1766 if (evutil_timercmp(&now, endtime, >=))
1769 if (base->event_continue)
1776 * Active events are stored in priority queues. Lower priorities are always
1777 * process before higher priorities. Low priority events can starve high
1782 event_process_active(struct event_base *base)
1784 /* Caller must hold th_base_lock */
1785 struct evcallback_list *activeq = NULL;
1787 const struct timeval *endtime;
1789 const int maxcb = base->max_dispatch_callbacks;
1790 const int limit_after_prio = base->limit_callbacks_after_prio;
1791 if (base->max_dispatch_time.tv_sec >= 0) {
1792 update_time_cache(base);
1794 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1800 for (i = 0; i < base->nactivequeues; ++i) {
1801 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1802 base->event_running_priority = i;
1803 activeq = &base->activequeues[i];
1804 if (i < limit_after_prio)
1805 c = event_process_active_single_queue(base, activeq,
1808 c = event_process_active_single_queue(base, activeq,
1813 break; /* Processed a real event; do not
1814 * consider lower-priority events */
1815 /* If we get here, all of the events we processed
1816 * were internal. Continue. */
1821 base->event_running_priority = -1;
1827 * Wait continuously for events. We exit only if no events are left.
1831 event_dispatch(void)
1833 return (event_loop(0));
1837 event_base_dispatch(struct event_base *event_base)
1839 return (event_base_loop(event_base, 0));
1843 event_base_get_method(const struct event_base *base)
1845 EVUTIL_ASSERT(base);
1846 return (base->evsel->name);
1849 /** Callback: used to implement event_base_loopexit by telling the event_base
1850 * that it's time to exit its loop. */
1852 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1854 struct event_base *base = arg;
1855 base->event_gotterm = 1;
1859 event_loopexit(const struct timeval *tv)
1861 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1866 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1868 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1873 event_loopbreak(void)
1875 return (event_base_loopbreak(current_base));
1879 event_base_loopbreak(struct event_base *event_base)
1882 if (event_base == NULL)
1885 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1886 event_base->event_break = 1;
1888 if (EVBASE_NEED_NOTIFY(event_base)) {
1889 r = evthread_notify_base(event_base);
1893 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1898 event_base_loopcontinue(struct event_base *event_base)
1901 if (event_base == NULL)
1904 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1905 event_base->event_continue = 1;
1907 if (EVBASE_NEED_NOTIFY(event_base)) {
1908 r = evthread_notify_base(event_base);
1912 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1917 event_base_got_break(struct event_base *event_base)
1920 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1921 res = event_base->event_break;
1922 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1927 event_base_got_exit(struct event_base *event_base)
1930 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1931 res = event_base->event_gotterm;
1932 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1936 /* not thread safe */
1939 event_loop(int flags)
1941 return event_base_loop(current_base, flags);
1945 event_base_loop(struct event_base *base, int flags)
1947 const struct eventop *evsel = base->evsel;
1949 struct timeval *tv_p;
1950 int res, done, retval = 0;
1951 struct evwatch_prepare_cb_info prepare_info;
1952 struct evwatch_check_cb_info check_info;
1953 struct evwatch *watcher;
1955 /* Grab the lock. We will release it inside evsel.dispatch, and again
1956 * as we invoke watchers and user callbacks. */
1957 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1959 if (base->running_loop) {
1960 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1961 " can run on each event_base at once.", __func__);
1962 EVBASE_RELEASE_LOCK(base, th_base_lock);
1966 base->running_loop = 1;
1968 clear_time_cache(base);
1970 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1971 evsig_set_base_(base);
1975 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1976 base->th_owner_id = EVTHREAD_GET_ID();
1979 base->event_gotterm = base->event_break = 0;
1982 base->event_continue = 0;
1983 base->n_deferreds_queued = 0;
1985 /* Terminate the loop if we have been asked to */
1986 if (base->event_gotterm) {
1990 if (base->event_break) {
1995 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1996 timeout_next(base, &tv_p);
1999 * if we have active events, we just poll new events
2002 evutil_timerclear(&tv);
2005 /* If we have no events, we just exit */
2006 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
2007 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
2008 event_debug(("%s: no events registered.", __func__));
2013 event_queue_make_later_events_active(base);
2015 /* Invoke prepare watchers before polling for events */
2016 prepare_info.timeout = tv_p;
2017 TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_PREPARE], next) {
2018 EVBASE_RELEASE_LOCK(base, th_base_lock);
2019 (*watcher->callback.prepare)(watcher, &prepare_info, watcher->arg);
2020 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2023 clear_time_cache(base);
2025 res = evsel->dispatch(base, tv_p);
2028 event_debug(("%s: dispatch returned unsuccessfully.",
2034 update_time_cache(base);
2036 /* Invoke check watchers after polling for events, and before
2037 * processing them */
2038 TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_CHECK], next) {
2039 EVBASE_RELEASE_LOCK(base, th_base_lock);
2040 (*watcher->callback.check)(watcher, &check_info, watcher->arg);
2041 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2044 timeout_process(base);
2046 if (N_ACTIVE_CALLBACKS(base)) {
2047 int n = event_process_active(base);
2048 if ((flags & EVLOOP_ONCE)
2049 && N_ACTIVE_CALLBACKS(base) == 0
2052 } else if (flags & EVLOOP_NONBLOCK)
2055 event_debug(("%s: asked to terminate loop.", __func__));
2058 clear_time_cache(base);
2059 base->running_loop = 0;
2061 EVBASE_RELEASE_LOCK(base, th_base_lock);
2066 /* One-time callback to implement event_base_once: invokes the user callback,
2067 * then deletes the allocated storage */
2069 event_once_cb(evutil_socket_t fd, short events, void *arg)
2071 struct event_once *eonce = arg;
2073 (*eonce->cb)(fd, events, eonce->arg);
2074 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2075 LIST_REMOVE(eonce, next_once);
2076 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2077 event_debug_unassign(&eonce->ev);
2081 /* not threadsafe, event scheduled once. */
2083 event_once(evutil_socket_t fd, short events,
2084 void (*callback)(evutil_socket_t, short, void *),
2085 void *arg, const struct timeval *tv)
2087 return event_base_once(current_base, fd, events, callback, arg, tv);
2090 /* Schedules an event once */
2092 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2093 void (*callback)(evutil_socket_t, short, void *),
2094 void *arg, const struct timeval *tv)
2096 struct event_once *eonce;
2103 /* We cannot support signals that just fire once, or persistent
2105 if (events & (EV_SIGNAL|EV_PERSIST))
2108 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2111 eonce->cb = callback;
2114 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2115 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2117 if (tv == NULL || ! evutil_timerisset(tv)) {
2118 /* If the event is going to become active immediately,
2119 * don't put it on the timeout queue. This is one
2120 * idiom for scheduling a callback, so let's make
2121 * it fast (and order-preserving). */
2124 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2125 events &= EV_READ|EV_WRITE|EV_CLOSED;
2127 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2129 /* Bad event combination */
2135 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2137 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2139 res = event_add_nolock_(&eonce->ev, tv, 0);
2145 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2147 EVBASE_RELEASE_LOCK(base, th_base_lock);
2154 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2157 base = current_base;
2158 if (arg == &event_self_cbarg_ptr_)
2161 if (!(events & EV_SIGNAL))
2162 event_debug_assert_socket_nonblocking_(fd);
2163 event_debug_assert_not_added_(ev);
2167 ev->ev_callback = callback;
2170 ev->ev_events = events;
2172 ev->ev_flags = EVLIST_INIT;
2174 ev->ev_pncalls = NULL;
2176 if (events & EV_SIGNAL) {
2177 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2178 event_warnx("%s: EV_SIGNAL is not compatible with "
2179 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2182 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2184 if (events & EV_PERSIST) {
2185 evutil_timerclear(&ev->ev_io_timeout);
2186 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2188 ev->ev_closure = EV_CLOSURE_EVENT;
2192 min_heap_elem_init_(ev);
2195 /* by default, we put new events into the middle priority */
2196 ev->ev_pri = base->nactivequeues / 2;
2199 event_debug_note_setup_(ev);
2205 event_base_set(struct event_base *base, struct event *ev)
2207 /* Only innocent events may be assigned to a different base */
2208 if (ev->ev_flags != EVLIST_INIT)
2211 event_debug_assert_is_setup_(ev);
2214 ev->ev_pri = base->nactivequeues/2;
2220 event_set(struct event *ev, evutil_socket_t fd, short events,
2221 void (*callback)(evutil_socket_t, short, void *), void *arg)
2224 r = event_assign(ev, current_base, fd, events, callback, arg);
2225 EVUTIL_ASSERT(r == 0);
2229 event_self_cbarg(void)
2231 return &event_self_cbarg_ptr_;
2235 event_base_get_running_event(struct event_base *base)
2237 struct event *ev = NULL;
2238 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2239 if (EVBASE_IN_THREAD(base)) {
2240 struct event_callback *evcb = base->current_event;
2241 if (evcb->evcb_flags & EVLIST_INIT)
2242 ev = event_callback_to_event(evcb);
2244 EVBASE_RELEASE_LOCK(base, th_base_lock);
2249 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2252 ev = mm_malloc(sizeof(struct event));
2255 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2264 event_free(struct event *ev)
2266 /* This is disabled, so that events which have been finalized be a
2267 * valid target for event_free(). That's */
2268 // event_debug_assert_is_setup_(ev);
2270 /* make sure that this event won't be coming back to haunt us. */
2272 event_debug_note_teardown_(ev);
2278 event_debug_unassign(struct event *ev)
2280 event_debug_assert_not_added_(ev);
2281 event_debug_note_teardown_(ev);
2283 ev->ev_flags &= ~EVLIST_INIT;
2286 #define EVENT_FINALIZE_FREE_ 0x10000
2288 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2290 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2291 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2293 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2294 ev->ev_closure = closure;
2295 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2296 event_active_nolock_(ev, EV_FINALIZE, 1);
2297 ev->ev_flags |= EVLIST_FINALIZING;
2302 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2305 struct event_base *base = ev->ev_base;
2306 if (EVUTIL_FAILURE_CHECK(!base)) {
2307 event_warnx("%s: event has no event_base set.", __func__);
2311 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2312 r = event_finalize_nolock_(base, flags, ev, cb);
2313 EVBASE_RELEASE_LOCK(base, th_base_lock);
2318 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2320 return event_finalize_impl_(flags, ev, cb);
2324 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2326 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2330 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2332 struct event *ev = NULL;
2333 if (evcb->evcb_flags & EVLIST_INIT) {
2334 ev = event_callback_to_event(evcb);
2335 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2337 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2340 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2341 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2342 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2343 evcb->evcb_flags |= EVLIST_FINALIZING;
2347 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2349 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2350 event_callback_finalize_nolock_(base, flags, evcb, cb);
2351 EVBASE_RELEASE_LOCK(base, th_base_lock);
2354 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2355 * callback will be invoked on *one of them*, after they have *all* been
2358 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2360 int n_pending = 0, i;
2363 base = current_base;
2365 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2367 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2369 /* At most one can be currently executing; the rest we just
2370 * cancel... But we always make sure that the finalize callback
2372 for (i = 0; i < n_cbs; ++i) {
2373 struct event_callback *evcb = evcbs[i];
2374 if (evcb == base->current_event) {
2375 event_callback_finalize_nolock_(base, 0, evcb, cb);
2378 event_callback_cancel_nolock_(base, evcb, 0);
2382 if (n_pending == 0) {
2383 /* Just do the first one. */
2384 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2387 EVBASE_RELEASE_LOCK(base, th_base_lock);
2392 * Set's the priority of an event - if an event is already scheduled
2393 * changing the priority is going to fail.
2397 event_priority_set(struct event *ev, int pri)
2399 event_debug_assert_is_setup_(ev);
2401 if (ev->ev_flags & EVLIST_ACTIVE)
2403 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2412 * Checks if a specific event is pending or scheduled.
2416 event_pending(const struct event *ev, short event, struct timeval *tv)
2420 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2421 event_warnx("%s: event has no event_base set.", __func__);
2425 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2426 event_debug_assert_is_setup_(ev);
2428 if (ev->ev_flags & EVLIST_INSERTED)
2429 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2430 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2431 flags |= ev->ev_res;
2432 if (ev->ev_flags & EVLIST_TIMEOUT)
2433 flags |= EV_TIMEOUT;
2435 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2437 /* See if there is a timeout that we should report */
2438 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2439 struct timeval tmp = ev->ev_timeout;
2440 tmp.tv_usec &= MICROSECONDS_MASK;
2441 /* correctly remamp to real time */
2442 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2445 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2447 return (flags & event);
2451 event_initialized(const struct event *ev)
2453 if (!(ev->ev_flags & EVLIST_INIT))
2460 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2462 event_debug_assert_is_setup_(event);
2465 *base_out = event->ev_base;
2467 *fd_out = event->ev_fd;
2469 *events_out = event->ev_events;
2471 *callback_out = event->ev_callback;
2473 *arg_out = event->ev_arg;
2477 event_get_struct_event_size(void)
2479 return sizeof(struct event);
2483 event_get_fd(const struct event *ev)
2485 event_debug_assert_is_setup_(ev);
2490 event_get_base(const struct event *ev)
2492 event_debug_assert_is_setup_(ev);
2497 event_get_events(const struct event *ev)
2499 event_debug_assert_is_setup_(ev);
2500 return ev->ev_events;
2504 event_get_callback(const struct event *ev)
2506 event_debug_assert_is_setup_(ev);
2507 return ev->ev_callback;
2511 event_get_callback_arg(const struct event *ev)
2513 event_debug_assert_is_setup_(ev);
2518 event_get_priority(const struct event *ev)
2520 event_debug_assert_is_setup_(ev);
2525 event_add(struct event *ev, const struct timeval *tv)
2529 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2530 event_warnx("%s: event has no event_base set.", __func__);
2534 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2536 res = event_add_nolock_(ev, tv, 0);
2538 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2543 /* Helper callback: wake an event_base from another thread. This version
2544 * works by writing a byte to one end of a socketpair, so that the event_base
2545 * listening on the other end will wake up as the corresponding event
2548 evthread_notify_base_default(struct event_base *base)
2554 r = send(base->th_notify_fd[1], buf, 1, 0);
2556 r = write(base->th_notify_fd[1], buf, 1);
2558 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2561 #ifdef EVENT__HAVE_EVENTFD
2562 /* Helper callback: wake an event_base from another thread. This version
2563 * assumes that you have a working eventfd() implementation. */
2565 evthread_notify_base_eventfd(struct event_base *base)
2567 ev_uint64_t msg = 1;
2570 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2571 } while (r < 0 && errno == EAGAIN);
2573 return (r < 0) ? -1 : 0;
2578 /** Tell the thread currently running the event_loop for base (if any) that it
2579 * needs to stop waiting in its dispatch function (if it is) and process all
2580 * active callbacks. */
2582 evthread_notify_base(struct event_base *base)
2584 EVENT_BASE_ASSERT_LOCKED(base);
2585 if (!base->th_notify_fn)
2587 if (base->is_notify_pending)
2589 base->is_notify_pending = 1;
2590 return base->th_notify_fn(base);
2593 /* Implementation function to remove a timeout on a currently pending event.
2596 event_remove_timer_nolock_(struct event *ev)
2598 struct event_base *base = ev->ev_base;
2600 EVENT_BASE_ASSERT_LOCKED(base);
2601 event_debug_assert_is_setup_(ev);
2603 event_debug(("event_remove_timer_nolock: event: %p", ev));
2605 /* If it's not pending on a timeout, we don't need to do anything. */
2606 if (ev->ev_flags & EVLIST_TIMEOUT) {
2607 event_queue_remove_timeout(base, ev);
2608 evutil_timerclear(&ev->ev_io_timeout);
2615 event_remove_timer(struct event *ev)
2619 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2620 event_warnx("%s: event has no event_base set.", __func__);
2624 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2626 res = event_remove_timer_nolock_(ev);
2628 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2633 /* Implementation function to add an event. Works just like event_add,
2634 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2635 * we treat tv as an absolute time, not as an interval to add to the current
2638 event_add_nolock_(struct event *ev, const struct timeval *tv,
2641 struct event_base *base = ev->ev_base;
2645 EVENT_BASE_ASSERT_LOCKED(base);
2646 event_debug_assert_is_setup_(ev);
2649 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2651 EV_SOCK_ARG(ev->ev_fd),
2652 ev->ev_events & EV_READ ? "EV_READ " : " ",
2653 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2654 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2655 tv ? "EV_TIMEOUT " : " ",
2658 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2660 if (ev->ev_flags & EVLIST_FINALIZING) {
2666 * prepare for timeout insertion further below, if we get a
2667 * failure on any step, we should not change any state.
2669 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2670 if (min_heap_reserve_(&base->timeheap,
2671 1 + min_heap_size_(&base->timeheap)) == -1)
2672 return (-1); /* ENOMEM == errno */
2675 /* If the main thread is currently executing a signal event's
2676 * callback, and we are not the main thread, then we want to wait
2677 * until the callback is done before we mess with the event, or else
2678 * we can race on ev_ncalls and ev_pncalls below. */
2679 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2680 if (base->current_event == event_to_event_callback(ev) &&
2681 (ev->ev_events & EV_SIGNAL)
2682 && !EVBASE_IN_THREAD(base)) {
2683 ++base->current_event_waiters;
2684 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2688 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2689 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2690 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2691 res = evmap_io_add_(base, ev->ev_fd, ev);
2692 else if (ev->ev_events & EV_SIGNAL)
2693 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2695 event_queue_insert_inserted(base, ev);
2697 /* evmap says we need to notify the main thread. */
2704 * we should change the timeout state only if the previous event
2705 * addition succeeded.
2707 if (res != -1 && tv != NULL) {
2710 #ifdef USE_REINSERT_TIMEOUT
2712 int old_timeout_idx;
2716 * for persistent timeout events, we remember the
2717 * timeout value and re-add the event.
2719 * If tv_is_absolute, this was already set.
2721 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2722 ev->ev_io_timeout = *tv;
2724 #ifndef USE_REINSERT_TIMEOUT
2725 if (ev->ev_flags & EVLIST_TIMEOUT) {
2726 event_queue_remove_timeout(base, ev);
2730 /* Check if it is active due to a timeout. Rescheduling
2731 * this timeout before the callback can be executed
2732 * removes it from the active list. */
2733 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2734 (ev->ev_res & EV_TIMEOUT)) {
2735 if (ev->ev_events & EV_SIGNAL) {
2736 /* See if we are just active executing
2737 * this event in a loop
2739 if (ev->ev_ncalls && ev->ev_pncalls) {
2741 *ev->ev_pncalls = 0;
2745 event_queue_remove_active(base, event_to_event_callback(ev));
2748 gettime(base, &now);
2750 common_timeout = is_common_timeout(tv, base);
2751 #ifdef USE_REINSERT_TIMEOUT
2752 was_common = is_common_timeout(&ev->ev_timeout, base);
2753 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2756 if (tv_is_absolute) {
2757 ev->ev_timeout = *tv;
2758 } else if (common_timeout) {
2759 struct timeval tmp = *tv;
2760 tmp.tv_usec &= MICROSECONDS_MASK;
2761 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2762 ev->ev_timeout.tv_usec |=
2763 (tv->tv_usec & ~MICROSECONDS_MASK);
2765 evutil_timeradd(&now, tv, &ev->ev_timeout);
2769 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2770 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2772 #ifdef USE_REINSERT_TIMEOUT
2773 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2775 event_queue_insert_timeout(base, ev);
2778 if (common_timeout) {
2779 struct common_timeout_list *ctl =
2780 get_common_timeout_list(base, &ev->ev_timeout);
2781 if (ev == TAILQ_FIRST(&ctl->events)) {
2782 common_timeout_schedule(ctl, &now, ev);
2785 struct event* top = NULL;
2786 /* See if the earliest timeout is now earlier than it
2787 * was before: if so, we will need to tell the main
2788 * thread to wake up earlier than it would otherwise.
2789 * We double check the timeout of the top element to
2790 * handle time distortions due to system suspension.
2792 if (min_heap_elt_is_top_(ev))
2794 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2795 evutil_timercmp(&top->ev_timeout, &now, <))
2800 /* if we are not in the right thread, we need to wake up the loop */
2801 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2802 evthread_notify_base(base);
2804 event_debug_note_add_(ev);
2810 event_del_(struct event *ev, int blocking)
2813 struct event_base *base = ev->ev_base;
2815 if (EVUTIL_FAILURE_CHECK(!base)) {
2816 event_warnx("%s: event has no event_base set.", __func__);
2820 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2821 res = event_del_nolock_(ev, blocking);
2822 EVBASE_RELEASE_LOCK(base, th_base_lock);
2828 event_del(struct event *ev)
2830 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2834 event_del_block(struct event *ev)
2836 return event_del_(ev, EVENT_DEL_BLOCK);
2840 event_del_noblock(struct event *ev)
2842 return event_del_(ev, EVENT_DEL_NOBLOCK);
2845 /** Helper for event_del: always called with th_base_lock held.
2847 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2848 * EVEN_IF_FINALIZING} values. See those for more information.
2851 event_del_nolock_(struct event *ev, int blocking)
2853 struct event_base *base;
2854 int res = 0, notify = 0;
2856 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2857 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2859 /* An event without a base has not been added */
2860 if (ev->ev_base == NULL)
2863 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2865 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2866 if (ev->ev_flags & EVLIST_FINALIZING) {
2874 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2876 /* See if we are just active executing this event in a loop */
2877 if (ev->ev_events & EV_SIGNAL) {
2878 if (ev->ev_ncalls && ev->ev_pncalls) {
2880 *ev->ev_pncalls = 0;
2884 if (ev->ev_flags & EVLIST_TIMEOUT) {
2885 /* NOTE: We never need to notify the main thread because of a
2886 * deleted timeout event: all that could happen if we don't is
2887 * that the dispatch loop might wake up too early. But the
2888 * point of notifying the main thread _is_ to wake up the
2889 * dispatch loop early anyway, so we wouldn't gain anything by
2892 event_queue_remove_timeout(base, ev);
2895 if (ev->ev_flags & EVLIST_ACTIVE)
2896 event_queue_remove_active(base, event_to_event_callback(ev));
2897 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2898 event_queue_remove_active_later(base, event_to_event_callback(ev));
2900 if (ev->ev_flags & EVLIST_INSERTED) {
2901 event_queue_remove_inserted(base, ev);
2902 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2903 res = evmap_io_del_(base, ev->ev_fd, ev);
2905 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2907 /* evmap says we need to notify the main thread. */
2911 /* If we do not have events, let's notify event base so it can
2912 * exit without waiting */
2913 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2917 /* if we are not in the right thread, we need to wake up the loop */
2918 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2919 evthread_notify_base(base);
2921 event_debug_note_del_(ev);
2923 /* If the main thread is currently executing this event's callback,
2924 * and we are not the main thread, then we want to wait until the
2925 * callback is done before returning. That way, when this function
2926 * returns, it will be safe to free the user-supplied argument.
2928 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2929 if (blocking != EVENT_DEL_NOBLOCK &&
2930 base->current_event == event_to_event_callback(ev) &&
2931 !EVBASE_IN_THREAD(base) &&
2932 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2933 ++base->current_event_waiters;
2934 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2942 event_active(struct event *ev, int res, short ncalls)
2944 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2945 event_warnx("%s: event has no event_base set.", __func__);
2949 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2951 event_debug_assert_is_setup_(ev);
2953 event_active_nolock_(ev, res, ncalls);
2955 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2960 event_active_nolock_(struct event *ev, int res, short ncalls)
2962 struct event_base *base;
2964 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2965 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2968 EVENT_BASE_ASSERT_LOCKED(base);
2970 if (ev->ev_flags & EVLIST_FINALIZING) {
2975 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2977 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2981 /* We get different kinds of events, add them together */
2984 case EVLIST_ACTIVE_LATER:
2992 if (ev->ev_pri < base->event_running_priority)
2993 base->event_continue = 1;
2995 if (ev->ev_events & EV_SIGNAL) {
2996 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2997 if (base->current_event == event_to_event_callback(ev) &&
2998 !EVBASE_IN_THREAD(base)) {
2999 ++base->current_event_waiters;
3000 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
3003 ev->ev_ncalls = ncalls;
3004 ev->ev_pncalls = NULL;
3007 event_callback_activate_nolock_(base, event_to_event_callback(ev));
3011 event_active_later_(struct event *ev, int res)
3013 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
3014 event_active_later_nolock_(ev, res);
3015 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
3019 event_active_later_nolock_(struct event *ev, int res)
3021 struct event_base *base = ev->ev_base;
3022 EVENT_BASE_ASSERT_LOCKED(base);
3024 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3025 /* We get different kinds of events, add them together */
3032 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
3036 event_callback_activate_(struct event_base *base,
3037 struct event_callback *evcb)
3040 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3041 r = event_callback_activate_nolock_(base, evcb);
3042 EVBASE_RELEASE_LOCK(base, th_base_lock);
3047 event_callback_activate_nolock_(struct event_base *base,
3048 struct event_callback *evcb)
3052 if (evcb->evcb_flags & EVLIST_FINALIZING)
3055 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3059 case EVLIST_ACTIVE_LATER:
3060 event_queue_remove_active_later(base, evcb);
3069 event_queue_insert_active(base, evcb);
3071 if (EVBASE_NEED_NOTIFY(base))
3072 evthread_notify_base(base);
3078 event_callback_activate_later_nolock_(struct event_base *base,
3079 struct event_callback *evcb)
3081 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3084 event_queue_insert_active_later(base, evcb);
3085 if (EVBASE_NEED_NOTIFY(base))
3086 evthread_notify_base(base);
3091 event_callback_init_(struct event_base *base,
3092 struct event_callback *cb)
3094 memset(cb, 0, sizeof(*cb));
3095 cb->evcb_pri = base->nactivequeues - 1;
3099 event_callback_cancel_(struct event_base *base,
3100 struct event_callback *evcb)
3103 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3104 r = event_callback_cancel_nolock_(base, evcb, 0);
3105 EVBASE_RELEASE_LOCK(base, th_base_lock);
3110 event_callback_cancel_nolock_(struct event_base *base,
3111 struct event_callback *evcb, int even_if_finalizing)
3113 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3116 if (evcb->evcb_flags & EVLIST_INIT)
3117 return event_del_nolock_(event_callback_to_event(evcb),
3118 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3120 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3122 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3126 /* We get different kinds of events, add them together */
3127 event_queue_remove_active(base, evcb);
3129 case EVLIST_ACTIVE_LATER:
3130 event_queue_remove_active_later(base, evcb);
3140 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3142 memset(cb, 0, sizeof(*cb));
3143 cb->evcb_cb_union.evcb_selfcb = fn;
3145 cb->evcb_pri = priority;
3146 cb->evcb_closure = EV_CLOSURE_CB_SELF;
3150 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3152 cb->evcb_pri = priority;
3156 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3159 base = current_base;
3160 event_callback_cancel_(base, cb);
3163 #define MAX_DEFERREDS_QUEUED 32
3165 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3169 base = current_base;
3170 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3171 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3172 r = event_callback_activate_later_nolock_(base, cb);
3174 r = event_callback_activate_nolock_(base, cb);
3176 ++base->n_deferreds_queued;
3179 EVBASE_RELEASE_LOCK(base, th_base_lock);
3184 timeout_next(struct event_base *base, struct timeval **tv_p)
3186 /* Caller must hold th_base_lock */
3189 struct timeval *tv = *tv_p;
3192 ev = min_heap_top_(&base->timeheap);
3195 /* if no time-based events are active wait for I/O */
3200 if (gettime(base, &now) == -1) {
3205 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3206 evutil_timerclear(tv);
3210 evutil_timersub(&ev->ev_timeout, &now, tv);
3212 EVUTIL_ASSERT(tv->tv_sec >= 0);
3213 EVUTIL_ASSERT(tv->tv_usec >= 0);
3214 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3220 /* Activate every event whose timeout has elapsed. */
3222 timeout_process(struct event_base *base)
3224 /* Caller must hold lock. */
3228 if (min_heap_empty_(&base->timeheap)) {
3232 gettime(base, &now);
3234 while ((ev = min_heap_top_(&base->timeheap))) {
3235 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3238 /* delete this event from the I/O queues */
3239 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3241 event_debug(("timeout_process: event: %p, call %p",
3242 ev, ev->ev_callback));
3243 event_active_nolock_(ev, EV_TIMEOUT, 1);
3248 #define MAX(a,b) (((a)>(b))?(a):(b))
3251 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3253 /* These are a fancy way to spell
3254 if (~flags & EVLIST_INTERNAL)
3255 base->event_count--/++;
3257 #define DECR_EVENT_COUNT(base,flags) \
3258 ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3259 #define INCR_EVENT_COUNT(base,flags) do { \
3260 ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
3261 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3265 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3267 EVENT_BASE_ASSERT_LOCKED(base);
3268 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3269 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3270 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3273 DECR_EVENT_COUNT(base, ev->ev_flags);
3274 ev->ev_flags &= ~EVLIST_INSERTED;
3277 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3279 EVENT_BASE_ASSERT_LOCKED(base);
3280 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3281 event_errx(1, "%s: %p not on queue %x", __func__,
3282 evcb, EVLIST_ACTIVE);
3285 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3286 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3287 base->event_count_active--;
3289 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3290 evcb, evcb_active_next);
3293 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3295 EVENT_BASE_ASSERT_LOCKED(base);
3296 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3297 event_errx(1, "%s: %p not on queue %x", __func__,
3298 evcb, EVLIST_ACTIVE_LATER);
3301 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3302 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3303 base->event_count_active--;
3305 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3308 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3310 EVENT_BASE_ASSERT_LOCKED(base);
3311 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3312 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3313 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3316 DECR_EVENT_COUNT(base, ev->ev_flags);
3317 ev->ev_flags &= ~EVLIST_TIMEOUT;
3319 if (is_common_timeout(&ev->ev_timeout, base)) {
3320 struct common_timeout_list *ctl =
3321 get_common_timeout_list(base, &ev->ev_timeout);
3322 TAILQ_REMOVE(&ctl->events, ev,
3323 ev_timeout_pos.ev_next_with_common_timeout);
3325 min_heap_erase_(&base->timeheap, ev);
3329 #ifdef USE_REINSERT_TIMEOUT
3330 /* Remove and reinsert 'ev' into the timeout queue. */
3332 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3333 int was_common, int is_common, int old_timeout_idx)
3335 struct common_timeout_list *ctl;
3336 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3337 event_queue_insert_timeout(base, ev);
3341 switch ((was_common<<1) | is_common) {
3342 case 3: /* Changing from one common timeout to another */
3343 ctl = base->common_timeout_queues[old_timeout_idx];
3344 TAILQ_REMOVE(&ctl->events, ev,
3345 ev_timeout_pos.ev_next_with_common_timeout);
3346 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3347 insert_common_timeout_inorder(ctl, ev);
3349 case 2: /* Was common; is no longer common */
3350 ctl = base->common_timeout_queues[old_timeout_idx];
3351 TAILQ_REMOVE(&ctl->events, ev,
3352 ev_timeout_pos.ev_next_with_common_timeout);
3353 min_heap_push_(&base->timeheap, ev);
3355 case 1: /* Wasn't common; has become common. */
3356 min_heap_erase_(&base->timeheap, ev);
3357 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3358 insert_common_timeout_inorder(ctl, ev);
3360 case 0: /* was in heap; is still on heap. */
3361 min_heap_adjust_(&base->timeheap, ev);
3364 EVUTIL_ASSERT(0); /* unreachable */
3370 /* Add 'ev' to the common timeout list in 'ev'. */
3372 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3376 /* By all logic, we should just be able to append 'ev' to the end of
3377 * ctl->events, since the timeout on each 'ev' is set to {the common
3378 * timeout} + {the time when we add the event}, and so the events
3379 * should arrive in order of their timeouts. But just in case
3380 * there's some wacky threading issue going on, we do a search from
3381 * the end of 'ev' to find the right insertion point.
3383 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3384 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3385 /* This timercmp is a little sneaky, since both ev and e have
3386 * magic values in tv_usec. Fortunately, they ought to have
3387 * the _same_ magic values in tv_usec. Let's assert for that.
3390 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3391 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3392 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3393 ev_timeout_pos.ev_next_with_common_timeout);
3397 TAILQ_INSERT_HEAD(&ctl->events, ev,
3398 ev_timeout_pos.ev_next_with_common_timeout);
3402 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3404 EVENT_BASE_ASSERT_LOCKED(base);
3406 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3407 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3408 ev, EV_SOCK_ARG(ev->ev_fd));
3412 INCR_EVENT_COUNT(base, ev->ev_flags);
3414 ev->ev_flags |= EVLIST_INSERTED;
3418 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3420 EVENT_BASE_ASSERT_LOCKED(base);
3422 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3423 /* Double insertion is possible for active events */
3427 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3429 evcb->evcb_flags |= EVLIST_ACTIVE;
3431 base->event_count_active++;
3432 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3433 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3434 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3435 evcb, evcb_active_next);
3439 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3441 EVENT_BASE_ASSERT_LOCKED(base);
3442 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3443 /* Double insertion is possible */
3447 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3448 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3449 base->event_count_active++;
3450 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3451 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3452 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3456 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3458 EVENT_BASE_ASSERT_LOCKED(base);
3460 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3461 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3462 ev, EV_SOCK_ARG(ev->ev_fd));
3466 INCR_EVENT_COUNT(base, ev->ev_flags);
3468 ev->ev_flags |= EVLIST_TIMEOUT;
3470 if (is_common_timeout(&ev->ev_timeout, base)) {
3471 struct common_timeout_list *ctl =
3472 get_common_timeout_list(base, &ev->ev_timeout);
3473 insert_common_timeout_inorder(ctl, ev);
3475 min_heap_push_(&base->timeheap, ev);
3480 event_queue_make_later_events_active(struct event_base *base)
3482 struct event_callback *evcb;
3483 EVENT_BASE_ASSERT_LOCKED(base);
3485 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3486 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3487 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3488 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3489 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3490 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3494 /* Functions for debugging */
3497 event_get_version(void)
3499 return (EVENT__VERSION);
3503 event_get_version_number(void)
3505 return (EVENT__NUMERIC_VERSION);
3509 * No thread-safe interface needed - the information should be the same
3514 event_get_method(void)
3516 return (current_base->evsel->name);
3519 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3520 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3521 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3522 static void (*mm_free_fn_)(void *p) = NULL;
3525 event_mm_malloc_(size_t sz)
3531 return mm_malloc_fn_(sz);
3537 event_mm_calloc_(size_t count, size_t size)
3539 if (count == 0 || size == 0)
3542 if (mm_malloc_fn_) {
3543 size_t sz = count * size;
3545 if (count > EV_SIZE_MAX / size)
3547 p = mm_malloc_fn_(sz);
3549 return memset(p, 0, sz);
3551 void *p = calloc(count, size);
3553 /* Windows calloc doesn't reliably set ENOMEM */
3566 event_mm_strdup_(const char *str)
3573 if (mm_malloc_fn_) {
3574 size_t ln = strlen(str);
3576 if (ln == EV_SIZE_MAX)
3578 p = mm_malloc_fn_(ln+1);
3580 return memcpy(p, str, ln+1);
3583 return _strdup(str);
3594 event_mm_realloc_(void *ptr, size_t sz)
3597 return mm_realloc_fn_(ptr, sz);
3599 return realloc(ptr, sz);
3603 event_mm_free_(void *ptr)
3612 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3613 void *(*realloc_fn)(void *ptr, size_t sz),
3614 void (*free_fn)(void *ptr))
3616 mm_malloc_fn_ = malloc_fn;
3617 mm_realloc_fn_ = realloc_fn;
3618 mm_free_fn_ = free_fn;
3622 #ifdef EVENT__HAVE_EVENTFD
3624 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3628 struct event_base *base = arg;
3630 r = read(fd, (void*) &msg, sizeof(msg));
3631 if (r<0 && errno != EAGAIN) {
3632 event_sock_warn(fd, "Error reading from eventfd");
3634 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3635 base->is_notify_pending = 0;
3636 EVBASE_RELEASE_LOCK(base, th_base_lock);
3641 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3643 unsigned char buf[1024];
3644 struct event_base *base = arg;
3646 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3649 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3653 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3654 base->is_notify_pending = 0;
3655 EVBASE_RELEASE_LOCK(base, th_base_lock);
3659 evthread_make_base_notifiable(struct event_base *base)
3665 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3666 r = evthread_make_base_notifiable_nolock_(base);
3667 EVBASE_RELEASE_LOCK(base, th_base_lock);
3672 evthread_make_base_notifiable_nolock_(struct event_base *base)
3674 void (*cb)(evutil_socket_t, short, void *);
3675 int (*notify)(struct event_base *);
3677 if (base->th_notify_fn != NULL) {
3678 /* The base is already notifiable: we're doing fine. */
3682 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3683 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3684 base->th_notify_fn = event_kq_notify_base_;
3685 /* No need to add an event here; the backend can wake
3686 * itself up just fine. */
3691 #ifdef EVENT__HAVE_EVENTFD
3692 base->th_notify_fd[0] = evutil_eventfd_(0,
3693 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3694 if (base->th_notify_fd[0] >= 0) {
3695 base->th_notify_fd[1] = -1;
3696 notify = evthread_notify_base_eventfd;
3697 cb = evthread_notify_drain_eventfd;
3700 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3701 notify = evthread_notify_base_default;
3702 cb = evthread_notify_drain_default;
3707 base->th_notify_fn = notify;
3709 /* prepare an event that we can use for wakeup */
3710 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3711 EV_READ|EV_PERSIST, cb, base);
3713 /* we need to mark this as internal event */
3714 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3715 event_priority_set(&base->th_notify, 0);
3717 return event_add_nolock_(&base->th_notify, NULL, 0);
3721 event_base_foreach_event_nolock_(struct event_base *base,
3722 event_base_foreach_event_cb fn, void *arg)
3728 /* Start out with all the EVLIST_INSERTED events. */
3729 if ((r = evmap_foreach_event_(base, fn, arg)))
3732 /* Okay, now we deal with those events that have timeouts and are in
3734 for (u = 0; u < base->timeheap.n; ++u) {
3735 ev = base->timeheap.p[u];
3736 if (ev->ev_flags & EVLIST_INSERTED) {
3737 /* we already processed this one */
3740 if ((r = fn(base, ev, arg)))
3744 /* Now for the events in one of the timeout queues.
3746 for (i = 0; i < base->n_common_timeouts; ++i) {
3747 struct common_timeout_list *ctl =
3748 base->common_timeout_queues[i];
3749 TAILQ_FOREACH(ev, &ctl->events,
3750 ev_timeout_pos.ev_next_with_common_timeout) {
3751 if (ev->ev_flags & EVLIST_INSERTED) {
3752 /* we already processed this one */
3755 if ((r = fn(base, ev, arg)))
3760 /* Finally, we deal wit all the active events that we haven't touched
3762 for (i = 0; i < base->nactivequeues; ++i) {
3763 struct event_callback *evcb;
3764 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3765 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3766 /* This isn't an event (evlist_init clear), or
3767 * we already processed it. (inserted or
3771 ev = event_callback_to_event(evcb);
3772 if ((r = fn(base, ev, arg)))
3780 /* Helper for event_base_dump_events: called on each event in the event base;
3781 * dumps only the inserted events. */
3783 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3786 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3789 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3792 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3793 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3794 (e->ev_events&EV_READ)?" Read":"",
3795 (e->ev_events&EV_WRITE)?" Write":"",
3796 (e->ev_events&EV_CLOSED)?" EOF":"",
3797 (e->ev_events&EV_SIGNAL)?" Signal":"",
3798 (e->ev_events&EV_PERSIST)?" Persist":"",
3799 (e->ev_events&EV_ET)?" ET":"",
3800 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3801 if (e->ev_flags & EVLIST_TIMEOUT) {
3803 tv.tv_sec = e->ev_timeout.tv_sec;
3804 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3805 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3806 fprintf(output, " Timeout=%ld.%06d",
3807 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3809 fputc('\n', output);
3814 /* Helper for event_base_dump_events: called on each event in the event base;
3815 * dumps only the active events. */
3817 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3820 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3823 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3826 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3827 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3828 (e->ev_res&EV_READ)?" Read":"",
3829 (e->ev_res&EV_WRITE)?" Write":"",
3830 (e->ev_res&EV_CLOSED)?" EOF":"",
3831 (e->ev_res&EV_SIGNAL)?" Signal":"",
3832 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3833 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3834 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3840 event_base_foreach_event(struct event_base *base,
3841 event_base_foreach_event_cb fn, void *arg)
3844 if ((!fn) || (!base)) {
3847 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3848 r = event_base_foreach_event_nolock_(base, fn, arg);
3849 EVBASE_RELEASE_LOCK(base, th_base_lock);
3855 event_base_dump_events(struct event_base *base, FILE *output)
3857 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3858 fprintf(output, "Inserted events:\n");
3859 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3861 fprintf(output, "Active events:\n");
3862 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3863 EVBASE_RELEASE_LOCK(base, th_base_lock);
3867 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3869 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3871 /* Activate any non timer events */
3872 if (!(events & EV_TIMEOUT)) {
3873 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3875 /* If we want to activate timer events, loop and activate each event with
3876 * the same fd in both the timeheap and common timeouts list */
3881 for (u = 0; u < base->timeheap.n; ++u) {
3882 ev = base->timeheap.p[u];
3883 if (ev->ev_fd == fd) {
3884 event_active_nolock_(ev, EV_TIMEOUT, 1);
3888 for (i = 0; i < base->n_common_timeouts; ++i) {
3889 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3890 TAILQ_FOREACH(ev, &ctl->events,
3891 ev_timeout_pos.ev_next_with_common_timeout) {
3892 if (ev->ev_fd == fd) {
3893 event_active_nolock_(ev, EV_TIMEOUT, 1);
3899 EVBASE_RELEASE_LOCK(base, th_base_lock);
3903 event_base_active_by_signal(struct event_base *base, int sig)
3905 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3906 evmap_signal_active_(base, sig, 1);
3907 EVBASE_RELEASE_LOCK(base, th_base_lock);
3912 event_base_add_virtual_(struct event_base *base)
3914 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3915 base->virtual_event_count++;
3916 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3917 EVBASE_RELEASE_LOCK(base, th_base_lock);
3921 event_base_del_virtual_(struct event_base *base)
3923 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3924 EVUTIL_ASSERT(base->virtual_event_count > 0);
3925 base->virtual_event_count--;
3926 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3927 evthread_notify_base(base);
3928 EVBASE_RELEASE_LOCK(base, th_base_lock);
3932 event_free_debug_globals_locks(void)
3934 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3935 #ifndef EVENT__DISABLE_DEBUG_MODE
3936 if (event_debug_map_lock_ != NULL) {
3937 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3938 event_debug_map_lock_ = NULL;
3939 evthreadimpl_disable_lock_debugging_();
3941 #endif /* EVENT__DISABLE_DEBUG_MODE */
3942 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3947 event_free_debug_globals(void)
3949 event_free_debug_globals_locks();
3953 event_free_evsig_globals(void)
3955 evsig_free_globals_();
3959 event_free_evutil_globals(void)
3961 evutil_free_globals_();
3965 event_free_globals(void)
3967 event_free_debug_globals();
3968 event_free_evsig_globals();
3969 event_free_evutil_globals();
3973 libevent_global_shutdown(void)
3975 event_disable_debug_mode();
3976 event_free_globals();
3979 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3981 event_global_setup_locks_(const int enable_locks)
3983 #ifndef EVENT__DISABLE_DEBUG_MODE
3984 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3986 if (evsig_global_setup_locks_(enable_locks) < 0)
3988 if (evutil_global_setup_locks_(enable_locks) < 0)
3990 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3997 event_base_assert_ok_(struct event_base *base)
3999 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4000 event_base_assert_ok_nolock_(base);
4001 EVBASE_RELEASE_LOCK(base, th_base_lock);
4005 event_base_assert_ok_nolock_(struct event_base *base)
4011 /* First do checks on the per-fd and per-signal lists */
4012 evmap_check_integrity_(base);
4014 /* Check the heap property */
4015 for (u = 1; u < base->timeheap.n; ++u) {
4016 size_t parent = (u - 1) / 2;
4017 struct event *ev, *p_ev;
4018 ev = base->timeheap.p[u];
4019 p_ev = base->timeheap.p[parent];
4020 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4021 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
4022 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
4025 /* Check that the common timeouts are fine */
4026 for (i = 0; i < base->n_common_timeouts; ++i) {
4027 struct common_timeout_list *ctl = base->common_timeout_queues[i];
4028 struct event *last=NULL, *ev;
4030 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
4032 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
4034 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
4035 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
4036 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
4037 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4042 /* Check the active queues. */
4044 for (i = 0; i < base->nactivequeues; ++i) {
4045 struct event_callback *evcb;
4046 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4047 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4048 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4049 EVUTIL_ASSERT(evcb->evcb_pri == i);
4055 struct event_callback *evcb;
4056 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4057 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4061 EVUTIL_ASSERT(count == base->event_count_active);