]> granicus.if.org Git - apache/blobdiff - server/mpm/event/event.c
merging pre_close_connection hook, prep_lingering_close and ap_update_child() additio...
[apache] / server / mpm / event / event.c
index 03fef9a2fc4112b56dd39a97a1b73ac0b08e2bf3..e76dee3441bedfe86b1aa1dddd8fbe294ab21349 100644 (file)
  *
  * After a client completes the first request, the client can keep the
  * connection open to send more requests with the same socket.  This can save
- * signifigant overhead in creating TCP connections.  However, the major
+ * significant overhead in creating TCP connections.  However, the major
  * disadvantage is that Apache traditionally keeps an entire child
  * process/thread waiting for data from the client.  To solve this problem,
- * this MPM has a dedicated thread for handling both the Listenting sockets,
+ * this MPM has a dedicated thread for handling both the Listening sockets,
  * and all sockets that are in a Keep Alive status.
  *
  * The MPM assumes the underlying apr_pollset implementation is somewhat
@@ -30,7 +30,7 @@
  * enables the MPM to avoid extra high level locking or having to wake up the
  * listener thread when a keep-alive socket needs to be sent to it.
  *
- * This MPM not preform well on older platforms that do not have very good
+ * This MPM does not perform well on older platforms that do not have very good
  * threading, like Linux with a 2.4 kernel, but this does not matter, since we
  * require EPoll or KQueue.
  *
@@ -59,6 +59,8 @@
 #include "apr_want.h"
 #include "apr_version.h"
 
+#include <stdlib.h>
+
 #if APR_HAVE_UNISTD_H
 #include <unistd.h>
 #endif
@@ -83,8 +85,8 @@
 #include "http_config.h"        /* for read_config */
 #include "http_core.h"          /* for get_remote_host */
 #include "http_connection.h"
+#include "http_protocol.h"
 #include "ap_mpm.h"
-#include "pod.h"
 #include "mpm_common.h"
 #include "ap_listen.h"
 #include "scoreboard.h"
 #include "mpm_default.h"
 #include "http_vhost.h"
 #include "unixd.h"
+#include "apr_skiplist.h"
 
 #include <signal.h>
 #include <limits.h>             /* for INT_MAX */
 
 
-#if HAVE_SERF
-#include "mod_serf.h"
-#include "serf.h"
-#endif
-
 /* Limit on the total --- clients will be locked out if more servers than
  * this are needed.  It is intended solely to keep the server from crashing
  * when things get out of hand.
  * Actual definitions of config globals
  */
 
+#ifndef DEFAULT_WORKER_FACTOR
+#define DEFAULT_WORKER_FACTOR 2
+#endif
+#define WORKER_FACTOR_SCALE   16  /* scale factor to allow fractional values */
+static unsigned int worker_factor = DEFAULT_WORKER_FACTOR * WORKER_FACTOR_SCALE;
+
 static int threads_per_child = 0;   /* Worker threads per child */
 static int ap_daemons_to_start = 0;
 static int min_spare_threads = 0;
@@ -165,64 +169,122 @@ static int ap_daemons_limit = 0;
 static int max_workers = 0;
 static int server_limit = 0;
 static int thread_limit = 0;
+static int had_healthy_child = 0;
 static int dying = 0;
 static int workers_may_exit = 0;
 static int start_thread_may_exit = 0;
 static int listener_may_exit = 0;
-static int requests_this_child;
 static int num_listensocks = 0;
-static apr_uint32_t connection_count = 0;
+static apr_int32_t conns_this_child;        /* MaxConnectionsPerChild, only access
+                                               in listener thread */
+static apr_uint32_t connection_count = 0;   /* Number of open connections */
+static apr_uint32_t lingering_count = 0;    /* Number of connections in lingering close */
+static apr_uint32_t suspended_count = 0;    /* Number of suspended connections */
+static apr_uint32_t clogged_count = 0;      /* Number of threads processing ssl conns */
 static int resource_shortage = 0;
 static fd_queue_t *worker_queue;
 static fd_queue_info_t *worker_queue_info;
 static int mpm_state = AP_MPMQ_STARTING;
 
 static apr_thread_mutex_t *timeout_mutex;
-APR_RING_HEAD(timeout_head_t, conn_state_t);
+
+module AP_MODULE_DECLARE_DATA mpm_event_module;
+
+/* forward declare */
+struct event_srv_cfg_s;
+typedef struct event_srv_cfg_s event_srv_cfg;
+
+struct event_conn_state_t {
+    /** APR_RING of expiration timeouts */
+    APR_RING_ENTRY(event_conn_state_t) timeout_list;
+    /** the time when the entry was queued */
+    apr_time_t queue_timestamp;
+    /** connection record this struct refers to */
+    conn_rec *c;
+    /** request record (if any) this struct refers to */
+    request_rec *r;
+    /** server config this struct refers to */
+    event_srv_cfg *sc;
+    /** is the current conn_rec suspended?  (disassociated with
+     * a particular MPM thread; for suspend_/resume_connection
+     * hooks)
+     */
+    int suspended;
+    /** memory pool to allocate from */
+    apr_pool_t *p;
+    /** bucket allocator */
+    apr_bucket_alloc_t *bucket_alloc;
+    /** poll file descriptor information */
+    apr_pollfd_t pfd;
+    /** public parts of the connection state */
+    conn_state_t pub;
+};
+APR_RING_HEAD(timeout_head_t, event_conn_state_t);
+
 struct timeout_queue {
     struct timeout_head_t head;
-    int count;
-    const char *tag;
+    int count, *total;
+    apr_interval_time_t timeout;
+    struct timeout_queue *next;
 };
-static struct timeout_queue write_completion_q, keepalive_q, linger_q,
-                            short_linger_q;
+/*
+ * Several timeout queues that use different timeouts, so that we always can
+ * simply append to the end.
+ *   write_completion_q uses vhost's TimeOut
+ *   keepalive_q        uses vhost's KeepAliveTimeOut
+ *   linger_q           uses MAX_SECS_TO_LINGER
+ *   short_linger_q     uses SECONDS_TO_LINGER
+ */
+static struct timeout_queue *write_completion_q,
+                            *keepalive_q,
+                            *linger_q,
+                            *short_linger_q;
+
 static apr_pollfd_t *listener_pollfd;
 
 /*
  * Macros for accessing struct timeout_queue.
  * For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held.
  */
-#define TO_QUEUE_APPEND(q, el)                                            \
-    do {                                                                  \
-        APR_RING_INSERT_TAIL(&(q).head, el, conn_state_t, timeout_list);  \
-        (q).count++;                                                      \
+#define TO_QUEUE_APPEND(q, el)                                                \
+    do {                                                                      \
+        APR_RING_INSERT_TAIL(&(q)->head, el, event_conn_state_t,              \
+                             timeout_list);                                   \
+        ++*(q)->total;                                                        \
+        ++(q)->count;                                                         \
     } while (0)
 
-#define TO_QUEUE_REMOVE(q, el)             \
-    do {                                   \
-        APR_RING_REMOVE(el, timeout_list); \
-        (q).count--;                       \
+#define TO_QUEUE_REMOVE(q, el)                                                \
+    do {                                                                      \
+        APR_RING_REMOVE(el, timeout_list);                                    \
+        --*(q)->total;                                                        \
+        --(q)->count;                                                         \
     } while (0)
 
-#define TO_QUEUE_INIT(q)                                            \
-    do {                                                            \
-            APR_RING_INIT(&(q).head, conn_state_t, timeout_list);   \
-            (q).tag = #q;                                           \
+#define TO_QUEUE_INIT(q, p, t, v)                                             \
+    do {                                                                      \
+        struct timeout_queue *b = (v);                                        \
+        (q) = apr_palloc((p), sizeof *(q));                                   \
+        APR_RING_INIT(&(q)->head, event_conn_state_t, timeout_list);          \
+        (q)->total = (b) ? (b)->total : apr_pcalloc((p), sizeof *(q)->total); \
+        (q)->count = 0;                                                       \
+        (q)->timeout = (t);                                                   \
+        (q)->next = NULL;                                                     \
     } while (0)
 
 #define TO_QUEUE_ELEM_INIT(el) APR_RING_ELEM_INIT(el, timeout_list)
 
+/*
+ * The pollset for sockets that are in any of the timeout queues. Currently
+ * we use the timeout_mutex to make sure that connections are added/removed
+ * atomically to/from both event_pollset and a timeout queue. Otherwise
+ * some confusion can happen under high load if timeout queues and pollset
+ * get out of sync.
+ * XXX: It should be possible to make the lock unnecessary in many or even all
+ * XXX: cases.
+ */
 static apr_pollset_t *event_pollset;
 
-#if HAVE_SERF
-typedef struct {
-    apr_pollset_t *pollset;
-    apr_pool_t *pool;
-} s_baton_t;
-
-static serf_context_t *g_serf;
-#endif
-
 /* The structure used to pass unique initialization info to each thread */
 typedef struct
 {
@@ -246,9 +308,6 @@ typedef enum
 {
     PT_CSD,
     PT_ACCEPT
-#if HAVE_SERF
-    , PT_SERF
-#endif
 } poll_type_e;
 
 typedef struct
@@ -279,21 +338,37 @@ typedef struct event_retained_data {
     /*
      * idle_spawn_rate is the number of children that will be spawned on the
      * next maintenance cycle if there aren't enough idle servers.  It is
-     * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by
-     * without the need to spawn.
+     * maintained per listeners bucket, doubled up to MAX_SPAWN_RATE, and
+     * reset only when a cycle goes by without the need to spawn.
      */
-    int idle_spawn_rate;
+    int *idle_spawn_rate;
 #ifndef MAX_SPAWN_RATE
 #define MAX_SPAWN_RATE        (32)
 #endif
     int hold_off_on_exponential_spawning;
+    /*
+     * Current number of listeners buckets and maximum reached accross
+     * restarts (to size retained data according to dynamic num_buckets,
+     * eg. idle_spawn_rate).
+     */
+    int num_buckets, max_buckets;
 } event_retained_data;
 static event_retained_data *retained;
+typedef struct event_child_bucket {
+    ap_pod_t *pod;
+    ap_listen_rec *listeners;
+} event_child_bucket;
+static event_child_bucket *all_buckets, /* All listeners buckets */
+                          *my_bucket;   /* Current child bucket */
+
+struct event_srv_cfg_s {
+    struct timeout_queue *wc_q,
+                         *ka_q;
+};
 
 #define ID_FROM_CHILD_THREAD(c, t)    ((c * thread_limit) + t)
 
-static ap_event_pod_t *pod;
-
 /* The event MPM respects a couple of runtime flags that can aid
  * in debugging. Setting the -DNO_DETACH flag will prevent the root process
  * from detaching from its controlling terminal. Additionally, setting
@@ -346,10 +421,14 @@ static void disable_listensocks(int process_slot)
 static void enable_listensocks(int process_slot)
 {
     int i;
-    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00457)
                  "Accepting new connections again: "
-                 "%u active conns, %u idle workers",
+                 "%u active conns (%u lingering/%u clogged/%u suspended), "
+                 "%u idle workers",
                  apr_atomic_read32(&connection_count),
+                 apr_atomic_read32(&lingering_count),
+                 apr_atomic_read32(&clogged_count),
+                 apr_atomic_read32(&suspended_count),
                  ap_queue_info_get_idlers(worker_queue_info));
     for (i = 0; i < num_listensocks; i++)
         apr_pollset_add(event_pollset, &listener_pollfd[i]);
@@ -444,9 +523,6 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
     case AP_MPMQ_IS_ASYNC:
         *result = 1;
         break;
-    case AP_MPMQ_HAS_SERF:
-        *result = 1;
-        break;
     case AP_MPMQ_HARD_LIMIT_DAEMONS:
         *result = server_limit;
         break;
@@ -511,7 +587,7 @@ static void event_note_child_started(int slot, pid_t pid)
 
 static void event_note_child_lost_slot(int slot, pid_t newpid)
 {
-    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00458)
                  "pid %" APR_PID_T_FMT " taking over scoreboard slot from "
                  "%" APR_PID_T_FMT "%s",
                  newpid,
@@ -565,9 +641,21 @@ static int child_fatal;
 /* volatile because they're updated from a signal handler */
 static int volatile shutdown_pending;
 static int volatile restart_pending;
-ap_generation_t volatile ap_my_generation = 0;
 
-static apr_status_t decrement_connection_count(void *dummy) {
+static apr_status_t decrement_connection_count(void *cs_)
+{
+    event_conn_state_t *cs = cs_;
+    switch (cs->pub.state) {
+        case CONN_STATE_LINGER_NORMAL:
+        case CONN_STATE_LINGER_SHORT:
+            apr_atomic_dec32(&lingering_count);
+            break;
+        case CONN_STATE_SUSPENDED:
+            apr_atomic_dec32(&suspended_count);
+            break;
+        default:
+            break;
+    }
     apr_atomic_dec32(&connection_count);
     return APR_SUCCESS;
 }
@@ -578,7 +666,7 @@ static apr_status_t decrement_connection_count(void *dummy) {
  * Previously this was initiated in sig_term() and restart() signal handlers,
  * but we want to be able to start a shutdown/restart from other sources --
  * e.g. on Win32, from the service manager. Now the service manager can
- * call ap_start_shutdown() or ap_start_restart() as appropiate.  Note that
+ * call ap_start_shutdown() or ap_start_restart() as appropriate.  Note that
  * these functions can also be called by the child processes, since global
  * variables are no longer used to pass on the required action to the parent.
  *
@@ -643,22 +731,22 @@ static void set_signals(void)
 
     sa.sa_handler = sig_term;
     if (sigaction(SIGTERM, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00459)
                      "sigaction(SIGTERM)");
 #ifdef AP_SIG_GRACEFUL_STOP
     if (sigaction(AP_SIG_GRACEFUL_STOP, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00460)
                      "sigaction(" AP_SIG_GRACEFUL_STOP_STRING ")");
 #endif
 #ifdef SIGINT
     if (sigaction(SIGINT, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00461)
                      "sigaction(SIGINT)");
 #endif
 #ifdef SIGXCPU
     sa.sa_handler = SIG_DFL;
     if (sigaction(SIGXCPU, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00462)
                      "sigaction(SIGXCPU)");
 #endif
 #ifdef SIGXFSZ
@@ -667,13 +755,13 @@ static void set_signals(void)
      * rather than terminate the process. */
     sa.sa_handler = SIG_IGN;
     if (sigaction(SIGXFSZ, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00463)
                      "sigaction(SIGXFSZ)");
 #endif
 #ifdef SIGPIPE
     sa.sa_handler = SIG_IGN;
     if (sigaction(SIGPIPE, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00464)
                      "sigaction(SIGPIPE)");
 #endif
 
@@ -683,10 +771,10 @@ static void set_signals(void)
     sigaddset(&sa.sa_mask, AP_SIG_GRACEFUL);
     sa.sa_handler = restart;
     if (sigaction(SIGHUP, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00465)
                      "sigaction(SIGHUP)");
     if (sigaction(AP_SIG_GRACEFUL, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00466)
                      "sigaction(" AP_SIG_GRACEFUL_STRING ")");
 #else
     if (!one_process) {
@@ -715,52 +803,126 @@ static void set_signals(void)
 #endif
 }
 
-static int start_lingering_close(conn_state_t *cs)
+static void notify_suspend(event_conn_state_t *cs)
 {
-    apr_status_t rv;
-    if (ap_start_lingering_close(cs->c)) {
-        apr_pool_clear(cs->p);
-        ap_push_pool(worker_queue_info, cs->p);
-        return 0;
-    }
-    else {
-        apr_socket_t *csd = ap_get_conn_socket(cs->c);
-        struct timeout_queue *q;
+    ap_run_suspend_connection(cs->c, cs->r);
+    cs->suspended = 1;
+    cs->c->sbh = NULL;
+}
+
+static void notify_resume(event_conn_state_t *cs, ap_sb_handle_t *sbh)
+{
+    cs->c->sbh = sbh;
+    cs->suspended = 0;
+    ap_run_resume_connection(cs->c, cs->r);
+}
 
+static int start_lingering_close_common(event_conn_state_t *cs, int in_worker)
+{
+    apr_status_t rv;
+    struct timeout_queue *q;
+    apr_socket_t *csd = cs->pfd.desc.s;
+#ifdef AP_DEBUG
+    {
         rv = apr_socket_timeout_set(csd, 0);
         AP_DEBUG_ASSERT(rv == APR_SUCCESS);
-        /*
-         * If some module requested a shortened waiting period, only wait for
-         * 2s (SECONDS_TO_LINGER). This is useful for mitigating certain
-         * DoS attacks.
-         */
-        if (apr_table_get(cs->c->notes, "short-lingering-close")) {
-            cs->expiration_time =
-                apr_time_now() + apr_time_from_sec(SECONDS_TO_LINGER);
-            q = &short_linger_q;
-            cs->state = CONN_STATE_LINGER_SHORT;
-        }
-        else {
-            cs->expiration_time =
-                apr_time_now() + apr_time_from_sec(MAX_SECS_TO_LINGER);
-            q = &linger_q;
-            cs->state = CONN_STATE_LINGER_NORMAL;
-        }
+    }
+#else
+    apr_socket_timeout_set(csd, 0);
+#endif
+    cs->queue_timestamp = apr_time_now();
+    /*
+     * If some module requested a shortened waiting period, only wait for
+     * 2s (SECONDS_TO_LINGER). This is useful for mitigating certain
+     * DoS attacks.
+     */
+    if (apr_table_get(cs->c->notes, "short-lingering-close")) {
+        q = short_linger_q;
+        cs->pub.state = CONN_STATE_LINGER_SHORT;
+    }
+    else {
+        q = linger_q;
+        cs->pub.state = CONN_STATE_LINGER_NORMAL;
+    }
+    apr_atomic_inc32(&lingering_count);
+    if (in_worker) { 
+        notify_suspend(cs);
+    }
+    else {
+        cs->c->sbh = NULL;
+    }
+    apr_thread_mutex_lock(timeout_mutex);
+    TO_QUEUE_APPEND(q, cs);
+    cs->pfd.reqevents = (
+            cs->pub.sense == CONN_SENSE_WANT_WRITE ? APR_POLLOUT :
+                    APR_POLLIN) | APR_POLLHUP | APR_POLLERR;
+    cs->pub.sense = CONN_SENSE_DEFAULT;
+    rv = apr_pollset_add(event_pollset, &cs->pfd);
+    apr_thread_mutex_unlock(timeout_mutex);
+    if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03092)
+                     "start_lingering_close: apr_pollset_add failure");
         apr_thread_mutex_lock(timeout_mutex);
-        TO_QUEUE_APPEND(*q, cs);
+        TO_QUEUE_REMOVE(q, cs);
         apr_thread_mutex_unlock(timeout_mutex);
-        cs->pfd.reqevents = APR_POLLIN | APR_POLLHUP | APR_POLLERR;
-        rv = apr_pollset_add(event_pollset, &cs->pfd);
-        if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
-            ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
-                         "start_lingering_close: apr_pollset_add failure");
-            AP_DEBUG_ASSERT(0);
-        }
+        apr_socket_close(cs->pfd.desc.s);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
     }
     return 1;
 }
 
-static int stop_lingering_close(conn_state_t *cs)
+/*
+ * Close our side of the connection, flushing data to the client first.
+ * Pre-condition: cs is not in any timeout queue and not in the pollset,
+ *                timeout_mutex is not locked
+ * return: 0 if connection is fully closed,
+ *         1 if connection is lingering
+ * May only be called by worker thread.
+ */
+static int start_lingering_close_blocking(event_conn_state_t *cs)
+{
+    if (ap_start_lingering_close(cs->c)) {
+        notify_suspend(cs);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
+    }
+    return start_lingering_close_common(cs, 1);
+}
+
+/*
+ * Close our side of the connection, NOT flushing data to the client.
+ * This should only be called if there has been an error or if we know
+ * that our send buffers are empty.
+ * Pre-condition: cs is not in any timeout queue and not in the pollset,
+ *                timeout_mutex is not locked
+ * return: 0 if connection is fully closed,
+ *         1 if connection is lingering
+ * may be called by listener thread
+ */
+static int start_lingering_close_nonblocking(event_conn_state_t *cs)
+{
+    conn_rec *c = cs->c;
+    apr_socket_t *csd = cs->pfd.desc.s;
+
+    if (ap_prep_lingering_close(c)
+        || c->aborted
+        || ap_shutdown_conn(c, 0) != APR_SUCCESS || c->aborted
+        || apr_socket_shutdown(csd, APR_SHUTDOWN_WRITE) != APR_SUCCESS) {
+        apr_socket_close(csd);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
+    }
+    return start_lingering_close_common(cs, 0);
+}
+
+/*
+ * forcibly close a lingering connection after the lingering period has
+ * expired
+ * Pre-condition: cs is not in any timeout queue and not in the pollset
+ * return: irrelevant (need same prototype as start_lingering_close)
+ */
+static int stop_lingering_close(event_conn_state_t *cs)
 {
     apr_status_t rv;
     apr_socket_t *csd = ap_get_conn_socket(cs->c);
@@ -768,60 +930,128 @@ static int stop_lingering_close(conn_state_t *cs)
                  "socket reached timeout in lingering-close state");
     rv = apr_socket_close(csd);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "error closing socket");
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00468) "error closing socket");
         AP_DEBUG_ASSERT(0);
     }
-    apr_pool_clear(cs->p);
     ap_push_pool(worker_queue_info, cs->p);
     return 0;
 }
 
+/*
+ * This runs before any non-MPM cleanup code on the connection;
+ * if the connection is currently suspended as far as modules
+ * know, provide notification of resumption.
+ */
+static apr_status_t ptrans_pre_cleanup(void *dummy)
+{
+    event_conn_state_t *cs = dummy;
 
+    if (cs->suspended) {
+        notify_resume(cs, NULL);
+    }
+    return APR_SUCCESS;
+}
 
-/*****************************************************************
- * Child process main loop.
+/*
+ * event_pre_read_request() and event_request_cleanup() track the
+ * current r for a given connection.
+ */
+static apr_status_t event_request_cleanup(void *dummy)
+{
+    conn_rec *c = dummy;
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
+
+    cs->r = NULL;
+    return APR_SUCCESS;
+}
+
+static void event_pre_read_request(request_rec *r, conn_rec *c)
+{
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
+
+    cs->r = r;
+    cs->sc = ap_get_module_config(ap_server_conf->module_config,
+                                  &mpm_event_module);
+    apr_pool_cleanup_register(r->pool, c, event_request_cleanup,
+                              apr_pool_cleanup_null);
+}
+
+/*
+ * event_post_read_request() tracks the current server config for a
+ * given request.
  */
+static int event_post_read_request(request_rec *r)
+{
+    conn_rec *c = r->connection;
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
 
-static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock,
-                          conn_state_t * cs, int my_child_num,
+    /* To preserve legacy behaviour (consistent with other MPMs), use
+     * the keepalive timeout from the base server (first on this IP:port)
+     * when none is explicitly configured on this server.
+     */
+    if (r->server->keep_alive_timeout_set) {
+        cs->sc = ap_get_module_config(r->server->module_config,
+                                      &mpm_event_module);
+    }
+    else {
+        cs->sc = ap_get_module_config(c->base_server->module_config,
+                                      &mpm_event_module);
+    }
+    return OK;
+}
+
+/*
+ * process one connection in the worker
+ */
+static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock,
+                          event_conn_state_t * cs, int my_child_num,
                           int my_thread_num)
 {
     conn_rec *c;
-    listener_poll_type *pt;
     long conn_id = ID_FROM_CHILD_THREAD(my_child_num, my_thread_num);
     int rc;
     ap_sb_handle_t *sbh;
 
+    /* XXX: This will cause unbounded mem usage for long lasting connections */
     ap_create_sb_handle(&sbh, p, my_child_num, my_thread_num);
 
     if (cs == NULL) {           /* This is a new connection */
-
-        cs = apr_pcalloc(p, sizeof(conn_state_t));
-
-        pt = apr_pcalloc(p, sizeof(*pt));
-
+        listener_poll_type *pt = apr_pcalloc(p, sizeof(*pt));
+        cs = apr_pcalloc(p, sizeof(event_conn_state_t));
         cs->bucket_alloc = apr_bucket_alloc_create(p);
         c = ap_run_create_connection(p, ap_server_conf, sock,
                                      conn_id, sbh, cs->bucket_alloc);
+        if (!c) {
+            ap_push_pool(worker_queue_info, p);
+            return;
+        }
         apr_atomic_inc32(&connection_count);
-        apr_pool_cleanup_register(c->pool, NULL, decrement_connection_count, apr_pool_cleanup_null);
+        apr_pool_cleanup_register(c->pool, cs, decrement_connection_count,
+                                  apr_pool_cleanup_null);
+        ap_set_module_config(c->conn_config, &mpm_event_module, cs);
         c->current_thread = thd;
         cs->c = c;
-        c->cs = cs;
+        c->cs = &(cs->pub);
         cs->p = p;
+        cs->sc = ap_get_module_config(ap_server_conf->module_config,
+                                      &mpm_event_module);
         cs->pfd.desc_type = APR_POLL_SOCKET;
         cs->pfd.reqevents = APR_POLLIN;
         cs->pfd.desc.s = sock;
         pt->type = PT_CSD;
         pt->baton = cs;
         cs->pfd.client_data = pt;
+        apr_pool_pre_cleanup_register(p, cs, ptrans_pre_cleanup);
         TO_QUEUE_ELEM_INIT(cs);
 
         ap_update_vhost_given_ip(c);
 
         rc = ap_run_pre_connection(c, sock);
         if (rc != OK && rc != DONE) {
-            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(00469)
                           "process_socket: connection aborted");
             c->aborted = 1;
         }
@@ -839,29 +1069,35 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
          * When the accept filter is active, sockets are kept in the
          * kernel until a HTTP request is received.
          */
-        cs->state = CONN_STATE_READ_REQUEST_LINE;
+        cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
 
+        cs->pub.sense = CONN_SENSE_DEFAULT;
     }
     else {
         c = cs->c;
-        c->sbh = sbh;
-        pt = cs->pfd.client_data;
+        notify_resume(cs, sbh);
         c->current_thread = thd;
+        /* Subsequent request on a conn, and thread number is part of ID */
+        c->id = conn_id;
     }
 
     if (c->clogging_input_filters && !c->aborted) {
-        /* Since we have an input filter which 'cloggs' the input stream,
-         * like mod_ssl, lets just do the normal read from input filters,
-         * like the Worker MPM does.
+        /* Since we have an input filter which 'clogs' the input stream,
+         * like mod_ssl used to, lets just do the normal read from input
+         * filters, like the Worker MPM does. Filters that need to write
+         * where they would otherwise read, or read where they would
+         * otherwise write, should set the sense appropriately.
          */
+        apr_atomic_inc32(&clogged_count);
         ap_run_process_connection(c);
-        if (cs->state != CONN_STATE_SUSPENDED) {
-            cs->state = CONN_STATE_LINGER;
+        if (cs->pub.state != CONN_STATE_SUSPENDED) {
+            cs->pub.state = CONN_STATE_LINGER;
         }
+        apr_atomic_dec32(&clogged_count);
     }
 
 read_request:
-    if (cs->state == CONN_STATE_READ_REQUEST_LINE) {
+    if (cs->pub.state == CONN_STATE_READ_REQUEST_LINE) {
         if (!c->aborted) {
             ap_run_process_connection(c);
 
@@ -871,11 +1107,11 @@ read_request:
              */
         }
         else {
-            cs->state = CONN_STATE_LINGER;
+            cs->pub.state = CONN_STATE_LINGER;
         }
     }
 
-    if (cs->state == CONN_STATE_WRITE_COMPLETION) {
+    if (cs->pub.state == CONN_STATE_WRITE_COMPLETION) {
         ap_filter_t *output_filter = c->output_filters;
         apr_status_t rv;
         ap_update_child_status_from_conn(sbh, SERVER_BUSY_WRITE, c);
@@ -884,44 +1120,45 @@ read_request:
         }
         rv = output_filter->frec->filter_func.out_func(output_filter, NULL);
         if (rv != APR_SUCCESS) {
-            ap_log_cerror(APLOG_MARK, APLOG_WARNING, rv, c,
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(00470)
                           "network write failure in core output filter");
-            cs->state = CONN_STATE_LINGER;
+            cs->pub.state = CONN_STATE_LINGER;
         }
         else if (c->data_in_output_filters) {
             /* Still in WRITE_COMPLETION_STATE:
              * Set a write timeout for this connection, and let the
              * event thread poll for writeability.
              */
-            cs->expiration_time = ap_server_conf->timeout + apr_time_now();
+            cs->queue_timestamp = apr_time_now();
+            notify_suspend(cs);
             apr_thread_mutex_lock(timeout_mutex);
-            TO_QUEUE_APPEND(write_completion_q, cs);
-            apr_thread_mutex_unlock(timeout_mutex);
-            cs->pfd.reqevents = APR_POLLOUT | APR_POLLHUP | APR_POLLERR;
+            TO_QUEUE_APPEND(cs->sc->wc_q, cs);
+            cs->pfd.reqevents = (
+                    cs->pub.sense == CONN_SENSE_WANT_READ ? APR_POLLIN :
+                            APR_POLLOUT) | APR_POLLHUP | APR_POLLERR;
+            cs->pub.sense = CONN_SENSE_DEFAULT;
             rc = apr_pollset_add(event_pollset, &cs->pfd);
-            return 1;
+            apr_thread_mutex_unlock(timeout_mutex);
+            return;
         }
         else if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted ||
             listener_may_exit) {
-            c->cs->state = CONN_STATE_LINGER;
+            cs->pub.state = CONN_STATE_LINGER;
         }
         else if (c->data_in_input_filters) {
-            cs->state = CONN_STATE_READ_REQUEST_LINE;
+            cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
             goto read_request;
         }
         else {
-            cs->state = CONN_STATE_CHECK_REQUEST_LINE_READABLE;
+            cs->pub.state = CONN_STATE_CHECK_REQUEST_LINE_READABLE;
         }
     }
 
-    if (cs->state == CONN_STATE_LINGER) {
-        if (!start_lingering_close(cs))
-            return 0;
+    if (cs->pub.state == CONN_STATE_LINGER) {
+        start_lingering_close_blocking(cs);
     }
-    else if (cs->state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
-        apr_status_t rc;
-
-        /* It greatly simplifies the logic to use a single timeout value here
+    else if (cs->pub.state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
+        /* It greatly simplifies the logic to use a single timeout value per q
          * because the new element can just be added to the end of the list and
          * it will stay sorted in expiration time sequence.  If brand new
          * sockets are sent to the event thread for a readability check, this
@@ -929,43 +1166,50 @@ read_request:
          * timeout today.  With a normal client, the socket will be readable in
          * a few milliseconds anyway.
          */
-        cs->expiration_time = ap_server_conf->keep_alive_timeout +
-                              apr_time_now();
+        cs->queue_timestamp = apr_time_now();
+        notify_suspend(cs);
         apr_thread_mutex_lock(timeout_mutex);
-        TO_QUEUE_APPEND(keepalive_q, cs);
-        apr_thread_mutex_unlock(timeout_mutex);
+        TO_QUEUE_APPEND(cs->sc->ka_q, cs);
 
         /* Add work to pollset. */
         cs->pfd.reqevents = APR_POLLIN;
         rc = apr_pollset_add(event_pollset, &cs->pfd);
+        apr_thread_mutex_unlock(timeout_mutex);
 
         if (rc != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(03093)
                          "process_socket: apr_pollset_add failure");
             AP_DEBUG_ASSERT(rc == APR_SUCCESS);
         }
     }
-    return 1;
+    else if (cs->pub.state == CONN_STATE_SUSPENDED) {
+        apr_atomic_inc32(&suspended_count);
+        notify_suspend(cs);
+    }
 }
 
-/* requests_this_child has gone to zero or below.  See if the admin coded
+/* conns_this_child has gone to zero or below.  See if the admin coded
    "MaxConnectionsPerChild 0", and keep going in that case.  Doing it this way
    simplifies the hot path in worker_thread */
 static void check_infinite_requests(void)
 {
     if (ap_max_requests_per_child) {
+        ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                     "Stopping process due to MaxConnectionsPerChild");
         signal_threads(ST_GRACEFUL);
     }
     else {
-        requests_this_child = INT_MAX;  /* keep going */
+        /* keep going */
+        conns_this_child = APR_INT32_MAX;
     }
 }
 
-static void close_listeners(int process_slot, int *closed) {
+static void close_listeners(int process_slot, int *closed)
+{
     if (!*closed) {
         int i;
         disable_listensocks(process_slot);
-        ap_close_listeners();
+        ap_close_listeners_ex(my_bucket->listeners);
         *closed = 1;
         dying = 1;
         ap_scoreboard_image->parent[process_slot].quiescing = 1;
@@ -999,47 +1243,14 @@ static void dummy_signal_handler(int sig)
 }
 
 
-#if HAVE_SERF
-static apr_status_t s_socket_add(void *user_baton,
-                                 apr_pollfd_t *pfd,
-                                 void *serf_baton)
-{
-    s_baton_t *s = (s_baton_t*)user_baton;
-    /* XXXXX: recycle listener_poll_types */
-    listener_poll_type *pt = malloc(sizeof(*pt));
-    pt->type = PT_SERF;
-    pt->baton = serf_baton;
-    pfd->client_data = pt;
-    return apr_pollset_add(s->pollset, pfd);
-}
-
-static apr_status_t s_socket_remove(void *user_baton,
-                                    apr_pollfd_t *pfd,
-                                    void *serf_baton)
-{
-    s_baton_t *s = (s_baton_t*)user_baton;
-    listener_poll_type *pt = pfd->client_data;
-    free(pt);
-    return apr_pollset_remove(s->pollset, pfd);
-}
-#endif
-
 static apr_status_t init_pollset(apr_pool_t *p)
 {
-#if HAVE_SERF
-    s_baton_t *baton = NULL;
-#endif
     ap_listen_rec *lr;
     listener_poll_type *pt;
     int i = 0;
 
-    TO_QUEUE_INIT(write_completion_q);
-    TO_QUEUE_INIT(keepalive_q);
-    TO_QUEUE_INIT(linger_q);
-    TO_QUEUE_INIT(short_linger_q);
-
     listener_pollfd = apr_palloc(p, sizeof(apr_pollfd_t) * num_listensocks);
-    for (lr = ap_listeners; lr != NULL; lr = lr->next, i++) {
+    for (lr = my_bucket->listeners; lr != NULL; lr = lr->next, i++) {
         apr_pollfd_t *pfd;
         AP_DEBUG_ASSERT(i < num_listensocks);
         pfd = &listener_pollfd[i];
@@ -1059,21 +1270,6 @@ static apr_status_t init_pollset(apr_pool_t *p)
         lr->accept_func = ap_unixd_accept;
     }
 
-#if HAVE_SERF
-    baton = apr_pcalloc(p, sizeof(*baton));
-    baton->pollset = event_pollset;
-    /* TODO: subpools, threads, reuse, etc.  -- currently use malloc() inside :( */
-    baton->pool = p;
-
-    g_serf = serf_context_create_ex(baton,
-                                    s_socket_add,
-                                    s_socket_remove, p);
-
-    ap_register_provider(p, "mpm_serf",
-                         "instance", "0", g_serf);
-
-#endif
-
     return APR_SUCCESS;
 }
 
@@ -1082,28 +1278,17 @@ static apr_status_t push_timer2worker(timer_event_t* te)
     return ap_queue_push_timer(worker_queue, te);
 }
 
+/*
+ * Pre-condition: pfd->cs is neither in pollset nor timeout queue
+ * this function may only be called by the listener
+ */
 static apr_status_t push2worker(const apr_pollfd_t * pfd,
                                 apr_pollset_t * pollset)
 {
     listener_poll_type *pt = (listener_poll_type *) pfd->client_data;
-    conn_state_t *cs = (conn_state_t *) pt->baton;
+    event_conn_state_t *cs = (event_conn_state_t *) pt->baton;
     apr_status_t rc;
 
-    rc = apr_pollset_remove(pollset, pfd);
-
-    /*
-     * Some of the pollset backends, like KQueue or Epoll
-     * automagically remove the FD if the socket is closed,
-     * therefore, we can accept _SUCCESS or _NOTFOUND,
-     * and we still want to keep going
-     */
-    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
-                     "pollset remove failed");
-        start_lingering_close(cs);
-        return rc;
-    }
-
     rc = ap_queue_push(worker_queue, cs->pfd.desc.s, cs, cs->p);
     if (rc != APR_SUCCESS) {
         /* trash the connection; we couldn't queue the connected
@@ -1112,8 +1297,7 @@ static apr_status_t push2worker(const apr_pollfd_t * pfd,
         apr_bucket_alloc_destroy(cs->bucket_alloc);
         apr_socket_close(cs->pfd.desc.s);
         ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                     ap_server_conf, "push2worker: ap_queue_push failed");
-        apr_pool_clear(cs->p);
+                     ap_server_conf, APLOGNO(00471) "push2worker: ap_queue_push failed");
         ap_push_pool(worker_queue_info, cs->p);
     }
 
@@ -1145,48 +1329,62 @@ static void get_worker(int *have_idle_worker_p, int blocking, int *all_busy)
     else
         rc = ap_queue_info_try_get_idler(worker_queue_info);
 
-    if (rc == APR_SUCCESS) {
+    if (rc == APR_SUCCESS || APR_STATUS_IS_EOF(rc)) {
         *have_idle_worker_p = 1;
     }
     else if (!blocking && rc == APR_EAGAIN) {
         *all_busy = 1;
     }
-    else if (!APR_STATUS_IS_EOF(rc)) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+    else {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(00472)
                      "ap_queue_info_wait_for_idler failed.  "
                      "Attempting to shutdown process gracefully");
         signal_threads(ST_GRACEFUL);
     }
 }
 
-/* XXXXXX: Convert to skiplist or other better data structure
- * (yes, this is VERY VERY VERY VERY BAD)
- */
-
 /* Structures to reuse */
 static APR_RING_HEAD(timer_free_ring_t, timer_event_t) timer_free_ring;
-/* Active timers */
-static APR_RING_HEAD(timer_ring_t, timer_event_t) timer_ring;
 
-static apr_thread_mutex_t *g_timer_ring_mtx;
+static apr_skiplist *timer_skiplist;
+
+/* The following compare function is used by apr_skiplist_insert() to keep the
+ * elements (timers) sorted and provide O(log n) complexity (this is also true
+ * for apr_skiplist_{find,remove}(), but those are not used in MPM event where
+ * inserted timers are not searched nor removed, but with apr_skiplist_pop()
+ * which does use any compare function).  It is meant to return 0 when a == b,
+ * <0 when a < b, and >0 when a > b.  However apr_skiplist_insert() will not
+ * add duplicates (i.e. a == b), and apr_skiplist_add() is only available in
+ * APR 1.6, yet multiple timers could possibly be created in the same micro-
+ * second (duplicates with regard to apr_time_t); therefore we implement the
+ * compare function to return +1 instead of 0 when compared timers are equal,
+ * thus duplicates are still added after each other (in order of insertion).
+ */
+static int timer_comp(void *a, void *b)
+{
+    apr_time_t t1 = (apr_time_t) ((timer_event_t *)a)->when;
+    apr_time_t t2 = (apr_time_t) ((timer_event_t *)b)->when;
+    AP_DEBUG_ASSERT(t1);
+    AP_DEBUG_ASSERT(t2);
+    return ((t1 < t2) ? -1 : 1);
+}
+
+static apr_thread_mutex_t *g_timer_skiplist_mtx;
 
 static apr_status_t event_register_timed_callback(apr_time_t t,
                                                   ap_mpm_callback_fn_t *cbfn,
                                                   void *baton)
 {
-    int inserted = 0;
-    timer_event_t *ep;
     timer_event_t *te;
     /* oh yeah, and make locking smarter/fine grained. */
-    apr_thread_mutex_lock(g_timer_ring_mtx);
+    apr_thread_mutex_lock(g_timer_skiplist_mtx);
 
     if (!APR_RING_EMPTY(&timer_free_ring, timer_event_t, link)) {
         te = APR_RING_FIRST(&timer_free_ring);
         APR_RING_REMOVE(te, link);
     }
     else {
-        /* XXXXX: lol, pool allocation without a context from any thread.Yeah. Right. MPMs Suck. */
-        te = malloc(sizeof(timer_event_t));
+        te = apr_skiplist_alloc(timer_skiplist, sizeof(timer_event_t));
         APR_RING_ELEM_INIT(te, link);
     }
 
@@ -1195,36 +1393,29 @@ static apr_status_t event_register_timed_callback(apr_time_t t,
     /* XXXXX: optimize */
     te->when = t + apr_time_now();
 
-    /* Okay, insert sorted by when.. */
-    for (ep = APR_RING_FIRST(&timer_ring);
-         ep != APR_RING_SENTINEL(&timer_ring,
-                                 timer_event_t, link);
-         ep = APR_RING_NEXT(ep, link))
-    {
-        if (ep->when > te->when) {
-            inserted = 1;
-            APR_RING_INSERT_BEFORE(ep, te, link);
-            break;
-        }
-    }
-
-    if (!inserted) {
-        APR_RING_INSERT_TAIL(&timer_ring, te, timer_event_t, link);
-    }
+    /* Okay, add sorted by when.. */
+    apr_skiplist_insert(timer_skiplist, te);
 
-    apr_thread_mutex_unlock(g_timer_ring_mtx);
+    apr_thread_mutex_unlock(g_timer_skiplist_mtx);
 
     return APR_SUCCESS;
 }
 
-static void process_lingering_close(conn_state_t *cs, const apr_pollfd_t *pfd)
+
+/*
+ * Close socket and clean up if remote closed its end while we were in
+ * lingering close.
+ * Only to be called in the listener thread;
+ * Pre-condition: cs is in one of the linger queues and in the pollset
+ */
+static void process_lingering_close(event_conn_state_t *cs, const apr_pollfd_t *pfd)
 {
     apr_socket_t *csd = ap_get_conn_socket(cs->c);
     char dummybuf[2048];
     apr_size_t nbytes;
     apr_status_t rv;
     struct timeout_queue *q;
-    q = (cs->state == CONN_STATE_LINGER_SHORT) ?  &short_linger_q : &linger_q;
+    q = (cs->pub.state == CONN_STATE_LINGER_SHORT) ? short_linger_q : linger_q;
 
     /* socket is already in non-blocking state */
     do {
@@ -1232,22 +1423,21 @@ static void process_lingering_close(conn_state_t *cs, const apr_pollfd_t *pfd)
         rv = apr_socket_recv(csd, dummybuf, &nbytes);
     } while (rv == APR_SUCCESS);
 
-    if (!APR_STATUS_IS_EOF(rv)) {
+    if (APR_STATUS_IS_EAGAIN(rv)) {
         return;
     }
 
+    apr_thread_mutex_lock(timeout_mutex);
     rv = apr_pollset_remove(event_pollset, pfd);
     AP_DEBUG_ASSERT(rv == APR_SUCCESS);
 
     rv = apr_socket_close(csd);
     AP_DEBUG_ASSERT(rv == APR_SUCCESS);
 
-    apr_thread_mutex_lock(timeout_mutex);
-    TO_QUEUE_REMOVE(*q, cs);
+    TO_QUEUE_REMOVE(q, cs);
     apr_thread_mutex_unlock(timeout_mutex);
     TO_QUEUE_ELEM_INIT(cs);
 
-    apr_pool_clear(cs->p);
     ap_push_pool(worker_queue_info, cs->p);
 }
 
@@ -1257,36 +1447,68 @@ static void process_lingering_close(conn_state_t *cs, const apr_pollfd_t *pfd)
  */
 static void process_timeout_queue(struct timeout_queue *q,
                                   apr_time_t timeout_time,
-                                  int (*func)(conn_state_t *))
+                                  int (*func)(event_conn_state_t *))
 {
-    int count = 0;
-    conn_state_t *first, *cs, *last;
-    if (!q->count) {
+    int total = 0, count;
+    event_conn_state_t *first, *cs, *last;
+    struct timeout_head_t trash;
+    struct timeout_queue *qp;
+    apr_status_t rv;
+
+    if (!*q->total) {
         return;
     }
-    AP_DEBUG_ASSERT(!APR_RING_EMPTY(&q->head, conn_state_t, timeout_list));
 
-    cs = first = APR_RING_FIRST(&q->head);
-    while (cs != APR_RING_SENTINEL(&q->head, conn_state_t, timeout_list)
-           && cs->expiration_time < timeout_time) {
-        last = cs;
-        cs = APR_RING_NEXT(cs, timeout_list);
-        count++;
+    APR_RING_INIT(&trash, event_conn_state_t, timeout_list);
+    for (qp = q; qp; qp = qp->next) {
+        count = 0;
+        cs = first = last = APR_RING_FIRST(&qp->head);
+        while (cs != APR_RING_SENTINEL(&qp->head, event_conn_state_t,
+                                       timeout_list)
+               /* Trash the entry if:
+                * - no timeout_time was given (asked for all), or
+                * - it expired (according to the queue timeout), or
+                * - the system clock skewed in the past: no entry should be
+                *   registered above the given timeout_time (~now) + the queue
+                *   timeout, we won't keep any here (eg. for centuries).
+                * Stop otherwise, no following entry will match thanks to the
+                * single timeout per queue (entries are added to the end!).
+                * This allows maintenance in O(1).
+                */
+               && (!timeout_time
+                   || cs->queue_timestamp + qp->timeout < timeout_time
+                   || cs->queue_timestamp > timeout_time + qp->timeout)) {
+            last = cs;
+            rv = apr_pollset_remove(event_pollset, &cs->pfd);
+            if (rv != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rv)) {
+                ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, cs->c, APLOGNO(00473)
+                              "apr_pollset_remove failed");
+            }
+            cs = APR_RING_NEXT(cs, timeout_list);
+            count++;
+        }
+        if (!count)
+            continue;
+
+        APR_RING_UNSPLICE(first, last, timeout_list);
+        APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
+                             timeout_list);
+        qp->count -= count;
+        total += count;
     }
-    if (!count)
+    if (!total)
         return;
 
-    APR_RING_UNSPLICE(first, last, timeout_list);
-    AP_DEBUG_ASSERT(q->count >= count);
-    q->count -= count;
+    AP_DEBUG_ASSERT(*q->total >= total);
+    *q->total -= total;
     apr_thread_mutex_unlock(timeout_mutex);
-    while (count) {
+    first = APR_RING_FIRST(&trash);
+    do {
         cs = APR_RING_NEXT(first, timeout_list);
         TO_QUEUE_ELEM_INIT(first);
         func(first);
         first = cs;
-        count--;
-    }
+    } while (--total);
     apr_thread_mutex_lock(timeout_mutex);
 }
 
@@ -1302,7 +1524,6 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
     ap_listen_rec *lr;
     int have_idle_worker = 0;
-    conn_state_t *cs;
     const apr_pollfd_t *out_pfd;
     apr_int32_t num = 0;
     apr_interval_time_t timeout_interval;
@@ -1321,9 +1542,6 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
 #define TIMEOUT_FUDGE_FACTOR 100000
 #define EVENT_FUDGE_FACTOR 10000
 
-/* XXX: this should be a config options */
-#define WORKER_OVERCOMMIT 2
-
     rc = init_pollset(tpool);
     if (rc != APR_SUCCESS) {
         ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
@@ -1348,9 +1566,8 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                 break;
         }
 
-        if (requests_this_child <= 0) {
+        if (conns_this_child <= 0)
             check_infinite_requests();
-        }
 
         now = apr_time_now();
         if (APLOGtrace6(ap_server_conf)) {
@@ -1359,18 +1576,21 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                 last_log = now;
                 apr_thread_mutex_lock(timeout_mutex);
                 ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
-                             "connections: %d (write-completion: %d "
-                             "keep-alive: %d lingering: %d)",
-                             connection_count, write_completion_q.count,
-                             keepalive_q.count,
-                             linger_q.count + short_linger_q.count);
+                             "connections: %u (clogged: %u write-completion: %d "
+                             "keep-alive: %d lingering: %d suspended: %u)",
+                             apr_atomic_read32(&connection_count),
+                             apr_atomic_read32(&clogged_count),
+                             *write_completion_q->total,
+                             *keepalive_q->total,
+                             apr_atomic_read32(&lingering_count),
+                             apr_atomic_read32(&suspended_count));
                 apr_thread_mutex_unlock(timeout_mutex);
             }
         }
 
-        apr_thread_mutex_lock(g_timer_ring_mtx);
-        if (!APR_RING_EMPTY(&timer_ring, timer_event_t, link)) {
-            te = APR_RING_FIRST(&timer_ring);
+        apr_thread_mutex_lock(g_timer_skiplist_mtx);
+        te = apr_skiplist_peek(timer_skiplist);
+        if (te) {
             if (te->when > now) {
                 timeout_interval = te->when - now;
             }
@@ -1381,14 +1601,8 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         else {
             timeout_interval = apr_time_from_msec(100);
         }
-        apr_thread_mutex_unlock(g_timer_ring_mtx);
+        apr_thread_mutex_unlock(g_timer_skiplist_mtx);
 
-#if HAVE_SERF
-        rc = serf_context_prerun(g_serf);
-        if (rc != APR_SUCCESS) {
-            /* TOOD: what should do here? ugh. */
-        }
-#endif
         rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd);
         if (rc != APR_SUCCESS) {
             if (APR_STATUS_IS_EINTR(rc)) {
@@ -1410,33 +1624,32 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         }
 
         now = apr_time_now();
-        apr_thread_mutex_lock(g_timer_ring_mtx);
-        for (ep = APR_RING_FIRST(&timer_ring);
-             ep != APR_RING_SENTINEL(&timer_ring,
-                                     timer_event_t, link);
-             ep = APR_RING_FIRST(&timer_ring))
-        {
+        apr_thread_mutex_lock(g_timer_skiplist_mtx);
+        ep = apr_skiplist_peek(timer_skiplist);
+        while (ep) {
             if (ep->when < now + EVENT_FUDGE_FACTOR) {
-                APR_RING_REMOVE(ep, link);
+                apr_skiplist_pop(timer_skiplist, NULL);
                 push_timer2worker(ep);
             }
             else {
                 break;
             }
+            ep = apr_skiplist_peek(timer_skiplist);
         }
-        apr_thread_mutex_unlock(g_timer_ring_mtx);
+        apr_thread_mutex_unlock(g_timer_skiplist_mtx);
 
         while (num) {
             pt = (listener_poll_type *) out_pfd->client_data;
             if (pt->type == PT_CSD) {
                 /* one of the sockets is readable */
-                struct timeout_queue *remove_from_q = &write_completion_q;
+                event_conn_state_t *cs = (event_conn_state_t *) pt->baton;
+                struct timeout_queue *remove_from_q = cs->sc->wc_q;
                 int blocking = 1;
-                cs = (conn_state_t *) pt->baton;
-                switch (cs->state) {
+
+                switch (cs->pub.state) {
                 case CONN_STATE_CHECK_REQUEST_LINE_READABLE:
-                    cs->state = CONN_STATE_READ_REQUEST_LINE;
-                    remove_from_q = &keepalive_q;
+                    cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
+                    remove_from_q = cs->sc->ka_q;
                     /* don't wait for a worker for a keepalive request */
                     blocking = 0;
                     /* FALL THROUGH */
@@ -1444,21 +1657,37 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                     get_worker(&have_idle_worker, blocking,
                                &workers_were_busy);
                     apr_thread_mutex_lock(timeout_mutex);
-                    TO_QUEUE_REMOVE(*remove_from_q, cs);
+                    TO_QUEUE_REMOVE(remove_from_q, cs);
+                    rc = apr_pollset_remove(event_pollset, &cs->pfd);
                     apr_thread_mutex_unlock(timeout_mutex);
+
+                    /*
+                     * Some of the pollset backends, like KQueue or Epoll
+                     * automagically remove the FD if the socket is closed,
+                     * therefore, we can accept _SUCCESS or _NOTFOUND,
+                     * and we still want to keep going
+                     */
+                    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
+                        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+                                     APLOGNO(03094) "pollset remove failed");
+                        start_lingering_close_nonblocking(cs);
+                        break;
+                    }
+
                     TO_QUEUE_ELEM_INIT(cs);
                     /* If we didn't get a worker immediately for a keep-alive
                      * request, we close the connection, so that the client can
                      * re-connect to a different process.
                      */
                     if (!have_idle_worker) {
-                        start_lingering_close(cs);
+                        start_lingering_close_nonblocking(cs);
                         break;
                     }
                     rc = push2worker(out_pfd, event_pollset);
                     if (rc != APR_SUCCESS) {
                         ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                     ap_server_conf, "push2worker failed");
+                                     ap_server_conf, APLOGNO(03095)
+                                     "push2worker failed");
                     }
                     else {
                         have_idle_worker = 0;
@@ -1470,10 +1699,10 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                     break;
                 default:
                     ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                 ap_server_conf,
+                                 ap_server_conf, APLOGNO(03096)
                                  "event_loop: unexpected state %d",
-                                 cs->state);
-                    AP_DEBUG_ASSERT(0);
+                                 cs->pub.state);
+                    ap_assert(0);
                 }
             }
             else if (pt->type == PT_ACCEPT) {
@@ -1483,11 +1712,14 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                         disable_listensocks(process_slot);
                     listeners_disabled = 1;
                     ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                                 "All workers busy, not accepting new conns"
+                                 "All workers busy, not accepting new conns "
                                  "in this process");
                 }
-                else if (apr_atomic_read32(&connection_count) > threads_per_child
-                         + ap_queue_info_get_idlers(worker_queue_info) * WORKER_OVERCOMMIT)
+                else if (  (int)apr_atomic_read32(&connection_count)
+                           - (int)apr_atomic_read32(&lingering_count)
+                         > threads_per_child
+                           + ap_queue_info_get_idlers(worker_queue_info) *
+                             worker_factor / WORKER_FACTOR_SCALE)
                 {
                     if (!listeners_disabled)
                         disable_listensocks(process_slot);
@@ -1519,7 +1751,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                         apr_allocator_owner_set(allocator, ptrans);
                         if (ptrans == NULL) {
                             ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                         ap_server_conf,
+                                         ap_server_conf, APLOGNO(03097)
                                          "Failed to create transaction pool");
                             signal_threads(ST_GRACEFUL);
                             return NULL;
@@ -1542,6 +1774,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                     }
 
                     if (csd != NULL) {
+                        conns_this_child--;
                         rc = ap_queue_push(worker_queue, csd, NULL, ptrans);
                         if (rc != APR_SUCCESS) {
                             /* trash the connection; we couldn't queue the connected
@@ -1549,9 +1782,8 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                              */
                             apr_socket_close(csd);
                             ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                         ap_server_conf,
+                                         ap_server_conf, APLOGNO(03098)
                                          "ap_queue_push failed");
-                            apr_pool_clear(ptrans);
                             ap_push_pool(worker_queue_info, ptrans);
                         }
                         else {
@@ -1559,18 +1791,10 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                         }
                     }
                     else {
-                        apr_pool_clear(ptrans);
                         ap_push_pool(worker_queue_info, ptrans);
                     }
                 }
             }               /* if:else on pt->type */
-#if HAVE_SERF
-            else if (pt->type == PT_SERF) {
-                /* send socket to serf. */
-                /* XXXX: this doesn't require get_worker() */
-                serf_event_trigger(g_serf, pt->baton, out_pfd);
-            }
-#endif
             out_pfd++;
             num--;
         }                   /* while for processing poll */
@@ -1579,8 +1803,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
          * r->request_time for new requests
          */
         now = apr_time_now();
-        /* we only do this once per 0.1s (TIMEOUT_FUDGE_FACTOR) */
-        if (now > timeout_time) {
+        /* We only do this once per 0.1s (TIMEOUT_FUDGE_FACTOR), or on a clock
+         * skew (if the system time is set back in the meantime, timeout_time
+         * will exceed now + TIMEOUT_FUDGE_FACTOR, can't happen otherwise).
+         */
+        if (now > timeout_time || now + TIMEOUT_FUDGE_FACTOR < timeout_time ) {
             struct process_score *ps;
             timeout_time = now + TIMEOUT_FUDGE_FACTOR;
 
@@ -1591,39 +1818,40 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
             /* If all workers are busy, we kill older keep-alive connections so that they
              * may connect to another process.
              */
-            if (workers_were_busy && keepalive_q.count) {
+            if (workers_were_busy && *keepalive_q->total) {
                 ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
                              "All workers are busy, will close %d keep-alive "
                              "connections",
-                             keepalive_q.count);
-                process_timeout_queue(&keepalive_q,
-                                      timeout_time + ap_server_conf->keep_alive_timeout,
-                                      start_lingering_close);
+                             *keepalive_q->total);
+                process_timeout_queue(keepalive_q, 0,
+                                      start_lingering_close_nonblocking);
             }
             else {
-                process_timeout_queue(&keepalive_q, timeout_time,
-                                      start_lingering_close);
+                process_timeout_queue(keepalive_q, timeout_time,
+                                      start_lingering_close_nonblocking);
             }
             /* Step 2: write completion timeouts */
-            process_timeout_queue(&write_completion_q, timeout_time, start_lingering_close);
+            process_timeout_queue(write_completion_q, timeout_time,
+                                  start_lingering_close_nonblocking);
             /* Step 3: (normal) lingering close completion timeouts */
-            process_timeout_queue(&linger_q, timeout_time, stop_lingering_close);
+            process_timeout_queue(linger_q, timeout_time, stop_lingering_close);
             /* Step 4: (short) lingering close completion timeouts */
-            process_timeout_queue(&short_linger_q, timeout_time, stop_lingering_close);
+            process_timeout_queue(short_linger_q, timeout_time, stop_lingering_close);
 
             ps = ap_get_scoreboard_process(process_slot);
-            ps->write_completion = write_completion_q.count;
-            ps->lingering_close = linger_q.count + short_linger_q.count;
-            ps->keep_alive = keepalive_q.count;
+            ps->write_completion = *write_completion_q->total;
+            ps->keep_alive = *keepalive_q->total;
             apr_thread_mutex_unlock(timeout_mutex);
 
             ps->connections = apr_atomic_read32(&connection_count);
-            /* XXX: should count CONN_STATE_SUSPENDED and set ps->suspended */
-        }
-        if (listeners_disabled && !workers_were_busy &&
-            (int)apr_atomic_read32(&connection_count) <
-            ((int)ap_queue_info_get_idlers(worker_queue_info) - 1) * WORKER_OVERCOMMIT +
-            threads_per_child)
+            ps->suspended = apr_atomic_read32(&suspended_count);
+            ps->lingering_close = apr_atomic_read32(&lingering_count);
+        }
+        if (listeners_disabled && !workers_were_busy
+            && (int)apr_atomic_read32(&connection_count)
+               - (int)apr_atomic_read32(&lingering_count)
+               < ((int)ap_queue_info_get_idlers(worker_queue_info) - 1)
+                 * worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
         {
             listeners_disabled = 0;
             enable_listensocks(process_slot);
@@ -1654,7 +1882,7 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
     int process_slot = ti->pid;
     int thread_slot = ti->tid;
     apr_socket_t *csd = NULL;
-    conn_state_t *cs;
+    event_conn_state_t *cs;
     apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
     apr_status_t rv;
     int is_idle = 0;
@@ -1715,7 +1943,7 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
             /* We got some other error. */
             else if (!workers_may_exit) {
                 ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
-                             "ap_queue_pop failed");
+                             APLOGNO(03099) "ap_queue_pop failed");
             }
             continue;
         }
@@ -1723,18 +1951,15 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
             te->cbfunc(te->baton);
 
             {
-                apr_thread_mutex_lock(g_timer_ring_mtx);
+                apr_thread_mutex_lock(g_timer_skiplist_mtx);
                 APR_RING_INSERT_TAIL(&timer_free_ring, te, timer_event_t, link);
-                apr_thread_mutex_unlock(g_timer_ring_mtx);
+                apr_thread_mutex_unlock(g_timer_skiplist_mtx);
             }
         }
         else {
             is_idle = 0;
             worker_sockets[thread_slot] = csd;
-            rv = process_socket(thd, ptrans, csd, cs, process_slot, thread_slot);
-            if (!rv) {
-                requests_this_child--;
-            }
+            process_socket(thd, ptrans, csd, cs, process_slot, thread_slot);
             worker_sockets[thread_slot] = NULL;
         }
     }
@@ -1767,14 +1992,14 @@ static void create_listener_thread(thread_starter * ts)
     proc_info *my_info;
     apr_status_t rv;
 
-    my_info = (proc_info *) malloc(sizeof(proc_info));
+    my_info = (proc_info *) ap_malloc(sizeof(proc_info));
     my_info->pid = my_child_num;
     my_info->tid = -1;          /* listener thread doesn't have a thread slot */
     my_info->sd = 0;
     rv = apr_thread_create(&ts->listener, thread_attr, listener_thread,
                            my_info, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00474)
                      "apr_thread_create: unable to create listener thread");
         /* let the parent decide how bad this really is */
         clean_child_exit(APEXIT_CHILDSICK);
@@ -1802,13 +2027,14 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     int loops;
     int prev_threads_created;
     int max_recycled_pools = -1;
+    int good_methods[] = {APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL};
 
     /* We must create the fd queues before we start up the listener
      * and worker threads. */
     worker_queue = apr_pcalloc(pchild, sizeof(*worker_queue));
     rv = ap_queue_init(worker_queue, threads_per_child, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03100)
                      "ap_queue_init() failed");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
@@ -1823,7 +2049,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     rv = ap_queue_info_create(&worker_queue_info, pchild,
                               threads_per_child, max_recycled_pools);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03101)
                      "ap_queue_info_create() failed");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
@@ -1834,24 +2060,40 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     rv = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_DEFAULT,
                                  pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03102)
                      "creation of the timeout mutex failed.");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
     /* Create the main pollset */
-    rv = apr_pollset_create(&event_pollset,
-                            threads_per_child, /* XXX don't we need more, to handle
+    for (i = 0; i < sizeof(good_methods) / sizeof(void*); i++) {
+        rv = apr_pollset_create_ex(&event_pollset,
+                            threads_per_child*2, /* XXX don't we need more, to handle
                                                 * connections in K-A or lingering
                                                 * close?
                                                 */
-                            pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
+                            pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY | APR_POLLSET_NODEFAULT,
+                            good_methods[i]);
+        if (rv == APR_SUCCESS) {
+            break;
+        }
+    }
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
+        rv = apr_pollset_create(&event_pollset,
+                               threads_per_child*2, /* XXX don't we need more, to handle
+                                                     * connections in K-A or lingering
+                                                     * close?
+                                                     */
+                               pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
+    }
+    if (rv != APR_SUCCESS) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03103)
                      "apr_pollset_create with Thread Safety failed.");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471)
+                 "start_threads: Using %s", apr_pollset_method_name(event_pollset));
     worker_sockets = apr_pcalloc(pchild, threads_per_child
                                  * sizeof(apr_socket_t *));
 
@@ -1866,12 +2108,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
                 continue;
             }
 
-            my_info = (proc_info *) malloc(sizeof(proc_info));
-            if (my_info == NULL) {
-                ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                             "malloc: out of memory");
-                clean_child_exit(APEXIT_CHILDFATAL);
-            }
+            my_info = (proc_info *) ap_malloc(sizeof(proc_info));
             my_info->pid = my_child_num;
             my_info->tid = i;
             my_info->sd = 0;
@@ -1886,6 +2123,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
                                    worker_thread, my_info, pchild);
             if (rv != APR_SUCCESS) {
                 ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+                             APLOGNO(03104)
                              "apr_thread_create: unable to create worker thread");
                 /* let the parent decide how bad this really is */
                 clean_child_exit(APEXIT_CHILDSICK);
@@ -1956,13 +2194,13 @@ static void join_workers(apr_thread_t * listener, apr_thread_t ** threads)
             ++iter;
         }
         if (iter >= 10) {
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00475)
                          "the listener thread didn't stop accepting");
         }
         else {
             rv = apr_thread_join(&thread_rv, listener);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00476)
                              "apr_thread_join: unable to join listener thread");
             }
         }
@@ -1972,7 +2210,7 @@ static void join_workers(apr_thread_t * listener, apr_thread_t ** threads)
         if (threads[i]) {       /* if we ever created this thread */
             rv = apr_thread_join(&thread_rv, threads[i]);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00477)
                              "apr_thread_join: unable to join worker "
                              "thread %d", i);
             }
@@ -1990,18 +2228,19 @@ static void join_start_thread(apr_thread_t * start_thread_id)
                                  */
     rv = apr_thread_join(&thread_rv, start_thread_id);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00478)
                      "apr_thread_join: unable to join the start " "thread");
     }
 }
 
-static void child_main(int child_num_arg)
+static void child_main(int child_num_arg, int child_bucket)
 {
     apr_thread_t **threads;
     apr_status_t rv;
     thread_starter *ts;
     apr_threadattr_t *thread_attr;
     apr_thread_t *start_thread_id;
+    int i;
 
     mpm_state = AP_MPMQ_STARTING;       /* for benefit of any hooks that run as this
                                          * child initializes
@@ -2010,6 +2249,14 @@ static void child_main(int child_num_arg)
     ap_fatal_signal_child_setup(ap_server_conf);
     apr_pool_create(&pchild, pconf);
 
+    /* close unused listeners and pods */
+    for (i = 0; i < retained->num_buckets; i++) {
+        if (i != child_bucket) {
+            ap_close_listeners_ex(all_buckets[i].listeners);
+            ap_mpm_podx_close(all_buckets[i].pod);
+        }
+    }
+
     /*stuff to do before we switch id's, so we have permissions. */
     ap_reopen_scoreboard(pchild, NULL, 0);
 
@@ -2017,9 +2264,10 @@ static void child_main(int child_num_arg)
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
-    apr_thread_mutex_create(&g_timer_ring_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
+    apr_thread_mutex_create(&g_timer_skiplist_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
     APR_RING_INIT(&timer_free_ring, timer_event_t, link);
-    APR_RING_INIT(&timer_ring, timer_event_t, link);
+    apr_skiplist_init(&timer_skiplist, pchild);
+    apr_skiplist_set_compare(timer_skiplist, timer_comp, timer_comp);
     ap_run_child_init(pchild, ap_server_conf);
 
     /* done with init critical section */
@@ -2030,17 +2278,17 @@ static void child_main(int child_num_arg)
      */
     rv = apr_setup_signal_thread();
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(00479)
                      "Couldn't initialize signal thread");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
     if (ap_max_requests_per_child) {
-        requests_this_child = ap_max_requests_per_child;
+        conns_this_child = ap_max_requests_per_child;
     }
     else {
         /* coding a value of zero means infinity */
-        requests_this_child = INT_MAX;
+        conns_this_child = APR_INT32_MAX;
     }
 
     /* Setup worker threads */
@@ -2048,23 +2296,21 @@ static void child_main(int child_num_arg)
     /* clear the storage; we may not create all our threads immediately,
      * and we want a 0 entry to indicate a thread which was not created
      */
-    threads = (apr_thread_t **) calloc(1,
-                                       sizeof(apr_thread_t *) *
-                                       threads_per_child);
-    if (threads == NULL) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                     "malloc: out of memory");
-        clean_child_exit(APEXIT_CHILDFATAL);
-    }
-
-    ts = (thread_starter *) apr_palloc(pchild, sizeof(*ts));
+    threads = ap_calloc(threads_per_child, sizeof(apr_thread_t *));
+    ts = apr_palloc(pchild, sizeof(*ts));
 
     apr_threadattr_create(&thread_attr, pchild);
     /* 0 means PTHREAD_CREATE_JOINABLE */
     apr_threadattr_detach_set(thread_attr, 0);
 
     if (ap_thread_stacksize != 0) {
-        apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
+        rv = apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
+        if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
+            ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(02436)
+                         "WARNING: ThreadStackSize of %" APR_SIZE_T_FMT " is "
+                         "inappropriate, using default", 
+                         ap_thread_stacksize);
+        }
     }
 
     ts->threads = threads;
@@ -2075,7 +2321,7 @@ static void child_main(int child_num_arg)
     rv = apr_thread_create(&start_thread_id, thread_attr, start_threads,
                            ts, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00480)
                      "apr_thread_create: unable to create worker thread");
         /* let the parent decide how bad this really is */
         clean_child_exit(APEXIT_CHILDSICK);
@@ -2120,25 +2366,25 @@ static void child_main(int child_num_arg)
         apr_signal(SIGTERM, dummy_signal_handler);
         /* Watch for any messages from the parent over the POD */
         while (1) {
-            rv = ap_event_pod_check(pod);
-            if (rv == AP_NORESTART) {
+            rv = ap_mpm_podx_check(my_bucket->pod);
+            if (rv == AP_MPM_PODX_NORESTART) {
                 /* see if termination was triggered while we slept */
                 switch (terminate_mode) {
                 case ST_GRACEFUL:
-                    rv = AP_GRACEFUL;
+                    rv = AP_MPM_PODX_GRACEFUL;
                     break;
                 case ST_UNGRACEFUL:
-                    rv = AP_RESTART;
+                    rv = AP_MPM_PODX_RESTART;
                     break;
                 }
             }
-            if (rv == AP_GRACEFUL || rv == AP_RESTART) {
+            if (rv == AP_MPM_PODX_GRACEFUL || rv == AP_MPM_PODX_RESTART) {
                 /* make sure the start thread has finished;
                  * signal_threads() and join_workers depend on that
                  */
                 join_start_thread(start_thread_id);
                 signal_threads(rv ==
-                               AP_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
+                               AP_MPM_PODX_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
                 break;
             }
         }
@@ -2158,7 +2404,7 @@ static void child_main(int child_num_arg)
     clean_child_exit(resource_shortage ? APEXIT_CHILDSICK : 0);
 }
 
-static int make_child(server_rec * s, int slot)
+static int make_child(server_rec * s, int slot, int bucket)
 {
     int pid;
 
@@ -2167,14 +2413,18 @@ static int make_child(server_rec * s, int slot)
     }
 
     if (one_process) {
+        my_bucket = &all_buckets[0];
+
         set_signals();
         event_note_child_started(slot, getpid());
-        child_main(slot);
+        child_main(slot, 0);
         /* NOTREACHED */
+        ap_assert(0);
+        return -1;
     }
 
     if ((pid = fork()) == -1) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, errno, s,
+        ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, APLOGNO(00481)
                      "fork: Unable to fork new process");
 
         /* fork didn't succeed.  There's no need to touch the scoreboard;
@@ -2193,6 +2443,8 @@ static int make_child(server_rec * s, int slot)
     }
 
     if (!pid) {
+        my_bucket = &all_buckets[bucket];
+
 #ifdef HAVE_BINDPROCESSOR
         /* By default, AIX binds to a single processor.  This bit unbinds
          * children which will then bind to another CPU.
@@ -2201,16 +2453,18 @@ static int make_child(server_rec * s, int slot)
                                    PROCESSOR_CLASS_ANY);
         if (status != OK)
             ap_log_error(APLOG_MARK, APLOG_DEBUG, errno,
-                         ap_server_conf,
+                         ap_server_conf, APLOGNO(00482)
                          "processor unbind failed");
 #endif
         RAISE_SIGSTOP(MAKE_CHILD);
 
         apr_signal(SIGTERM, just_die);
-        child_main(slot);
+        child_main(slot, bucket);
         /* NOTREACHED */
+        ap_assert(0);
+        return -1;
     }
-    /* else */
+
     if (ap_scoreboard_image->parent[slot].pid != 0) {
         /* This new child process is squatting on the scoreboard
          * entry owned by an exiting child process, which cannot
@@ -2220,6 +2474,7 @@ static int make_child(server_rec * s, int slot)
     }
     ap_scoreboard_image->parent[slot].quiescing = 0;
     ap_scoreboard_image->parent[slot].not_accepting = 0;
+    ap_scoreboard_image->parent[slot].bucket = bucket;
     event_note_child_started(slot, pid);
     return 0;
 }
@@ -2233,14 +2488,14 @@ static void startup_children(int number_to_start)
         if (ap_scoreboard_image->parent[i].pid != 0) {
             continue;
         }
-        if (make_child(ap_server_conf, i) < 0) {
+        if (make_child(ap_server_conf, i, i % retained->num_buckets) < 0) {
             break;
         }
         --number_to_start;
     }
 }
 
-static void perform_idle_server_maintenance(void)
+static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
 {
     int i, j;
     int idle_thread_count;
@@ -2267,9 +2522,10 @@ static void perform_idle_server_maintenance(void)
         int any_dying_threads = 0;
         int any_dead_threads = 0;
         int all_dead_threads = 1;
+        int child_threads_active = 0;
 
         if (i >= retained->max_daemons_limit
-            && totally_free_length == retained->idle_spawn_rate)
+            && totally_free_length == retained->idle_spawn_rate[child_bucket])
             /* short cut if all active processes have been examined and
              * enough empty scoreboard slots have been found
              */
@@ -2296,18 +2552,20 @@ static void perform_idle_server_maintenance(void)
             if (ps->pid != 0) { /* XXX just set all_dead_threads in outer
                                    for loop if no pid?  not much else matters */
                 if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting
-                    && ps->generation == retained->my_generation)
+                    && ps->generation == retained->my_generation
+                    && ps->bucket == child_bucket)
                 {
                     ++idle_thread_count;
                 }
                 if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
-                    ++active_thread_count;
+                    ++child_threads_active;
                 }
             }
         }
+        active_thread_count += child_threads_active;
         if (any_dead_threads
-            && totally_free_length < retained->idle_spawn_rate
-            && free_length < MAX_SPAWN_RATE
+            && totally_free_length < retained->idle_spawn_rate[child_bucket]
+            && free_length < MAX_SPAWN_RATE / num_buckets
             && (!ps->pid      /* no process in the slot */
                   || ps->quiescing)) {  /* or at least one is going away */
             if (all_dead_threads) {
@@ -2328,6 +2586,9 @@ static void perform_idle_server_maintenance(void)
             }
             ++free_length;
         }
+        else if (child_threads_active == threads_per_child) {
+            had_healthy_child = 1;
+        }
         /* XXX if (!ps->quiescing)     is probably more reliable  GLA */
         if (!any_dying_threads) {
             last_non_dead = i;
@@ -2336,21 +2597,23 @@ static void perform_idle_server_maintenance(void)
     }
 
     if (retained->sick_child_detected) {
-        if (active_thread_count > 0) {
-            /* some child processes appear to be working.  don't kill the
-             * whole server.
+        if (had_healthy_child) {
+            /* Assume this is a transient error, even though it may not be.  Leave
+             * the server up in case it is able to serve some requests or the
+             * problem will be resolved.
              */
             retained->sick_child_detected = 0;
         }
         else {
-            /* looks like a basket case give up.
+            /* looks like a basket case, as no child ever fully initialized; give up.
              */
             shutdown_pending = 1;
             child_fatal = 1;
             ap_log_error(APLOG_MARK, APLOG_ALERT, 0,
-                         ap_server_conf,
-                         "No active workers found..."
-                         " Apache is exiting!");
+                         ap_server_conf, APLOGNO(02324)
+                         "A resource shortage or other unrecoverable failure "
+                         "was encountered before any child process initialized "
+                         "successfully... httpd is exiting!");
             /* the child already logged the failure details */
             return;
         }
@@ -2358,19 +2621,20 @@ static void perform_idle_server_maintenance(void)
 
     retained->max_daemons_limit = last_non_dead + 1;
 
-    if (idle_thread_count > max_spare_threads) {
+    if (idle_thread_count > max_spare_threads / num_buckets) {
         /* Kill off one child */
-        ap_event_pod_signal(pod, TRUE);
-        retained->idle_spawn_rate = 1;
+        ap_mpm_podx_signal(all_buckets[child_bucket].pod,
+                           AP_MPM_PODX_GRACEFUL);
+        retained->idle_spawn_rate[child_bucket] = 1;
     }
-    else if (idle_thread_count < min_spare_threads) {
+    else if (idle_thread_count < min_spare_threads / num_buckets) {
         /* terminate the free list */
         if (free_length == 0) { /* scoreboard is full, can't fork */
 
             if (active_thread_count >= ap_daemons_limit * threads_per_child) {
                 if (!retained->maxclients_reported) {
                     /* only report this condition once */
-                    ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf,
+                    ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00484)
                                  "server reached MaxRequestWorkers setting, "
                                  "consider raising the MaxRequestWorkers "
                                  "setting");
@@ -2378,17 +2642,17 @@ static void perform_idle_server_maintenance(void)
                 }
             }
             else {
-                ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00485)
                              "scoreboard is full, not at MaxRequestWorkers");
             }
-            retained->idle_spawn_rate = 1;
+            retained->idle_spawn_rate[child_bucket] = 1;
         }
         else {
-            if (free_length > retained->idle_spawn_rate) {
-                free_length = retained->idle_spawn_rate;
+            if (free_length > retained->idle_spawn_rate[child_bucket]) {
+                free_length = retained->idle_spawn_rate[child_bucket];
             }
-            if (retained->idle_spawn_rate >= 8) {
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
+            if (retained->idle_spawn_rate[child_bucket] >= 8) {
+                ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00486)
                              "server seems busy, (you may need "
                              "to increase StartServers, ThreadsPerChild "
                              "or Min/MaxSpareThreads), "
@@ -2397,7 +2661,7 @@ static void perform_idle_server_maintenance(void)
                              idle_thread_count, total_non_dead);
             }
             for (i = 0; i < free_length; ++i) {
-                make_child(ap_server_conf, free_slots[i]);
+                make_child(ap_server_conf, free_slots[i], child_bucket);
             }
             /* the next time around we want to spawn twice as many if this
              * wasn't good enough, but not if we've just done a graceful
@@ -2405,17 +2669,18 @@ static void perform_idle_server_maintenance(void)
             if (retained->hold_off_on_exponential_spawning) {
                 --retained->hold_off_on_exponential_spawning;
             }
-            else if (retained->idle_spawn_rate < MAX_SPAWN_RATE) {
-                retained->idle_spawn_rate *= 2;
+            else if (retained->idle_spawn_rate[child_bucket]
+                     < MAX_SPAWN_RATE / num_buckets) {
+                retained->idle_spawn_rate[child_bucket] *= 2;
             }
         }
     }
     else {
-        retained->idle_spawn_rate = 1;
+        retained->idle_spawn_rate[child_bucket] = 1;
     }
 }
 
-static void server_main_loop(int remaining_children_to_start)
+static void server_main_loop(int remaining_children_to_start, int num_buckets)
 {
     ap_generation_t old_gen;
     int child_slot;
@@ -2429,10 +2694,27 @@ static void server_main_loop(int remaining_children_to_start)
 
         if (pid.pid != -1) {
             processed_status = ap_process_child_status(&pid, exitwhy, status);
+            child_slot = ap_find_child_by_pid(&pid);
             if (processed_status == APEXIT_CHILDFATAL) {
-                shutdown_pending = 1;
-                child_fatal = 1;
-                return;
+                /* fix race condition found in PR 39311
+                 * A child created at the same time as a graceful happens 
+                 * can find the lock missing and create a fatal error.
+                 * It is not fatal for the last generation to be in this state.
+                 */
+                if (child_slot < 0
+                    || ap_get_scoreboard_process(child_slot)->generation
+                       == retained->my_generation) {
+                    shutdown_pending = 1;
+                    child_fatal = 1;
+                    return;
+                }
+                else {
+                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(00487)
+                                 "Ignoring fatal error in child of previous "
+                                 "generation (pid %ld).",
+                                 (long)pid.pid);
+                    retained->sick_child_detected = 1;
+                }
             }
             else if (processed_status == APEXIT_CHILDSICK) {
                 /* tell perform_idle_server_maintenance to check into this
@@ -2441,25 +2723,27 @@ static void server_main_loop(int remaining_children_to_start)
                 retained->sick_child_detected = 1;
             }
             /* non-fatal death... note that it's gone in the scoreboard. */
-            child_slot = ap_find_child_by_pid(&pid);
             if (child_slot >= 0) {
+                process_score *ps;
+
                 for (i = 0; i < threads_per_child; i++)
                     ap_update_child_status_from_indexes(child_slot, i,
                                                         SERVER_DEAD,
                                                         (request_rec *) NULL);
 
                 event_note_child_killed(child_slot, 0, 0);
-                ap_scoreboard_image->parent[child_slot].quiescing = 0;
+                ps = &ap_scoreboard_image->parent[child_slot];
+                ps->quiescing = 0;
                 if (processed_status == APEXIT_CHILDSICK) {
                     /* resource shortage, minimize the fork rate */
-                    retained->idle_spawn_rate = 1;
+                    retained->idle_spawn_rate[ps->bucket] = 1;
                 }
                 else if (remaining_children_to_start
                          && child_slot < ap_daemons_limit) {
                     /* we're still doing a 1-for-1 replacement of dead
                      * children with new children
                      */
-                    make_child(ap_server_conf, child_slot);
+                    make_child(ap_server_conf, child_slot, ps->bucket);
                     --remaining_children_to_start;
                 }
             }
@@ -2467,6 +2751,13 @@ static void server_main_loop(int remaining_children_to_start)
 
                 event_note_child_killed(-1, /* already out of the scoreboard */
                                         pid.pid, old_gen);
+                if (processed_status == APEXIT_CHILDSICK
+                    && old_gen == retained->my_generation) {
+                    /* resource shortage, minimize the fork rate */
+                    for (i = 0; i < num_buckets; i++) {
+                        retained->idle_spawn_rate[i] = 1;
+                    }
+                }
 #if APR_HAS_OTHER_CHILD
             }
             else if (apr_proc_other_child_alert(&pid, APR_OC_REASON_DEATH,
@@ -2479,7 +2770,7 @@ static void server_main_loop(int remaining_children_to_start)
                  * scoreboard.  Somehow we don't know about this child.
                  */
                 ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
-                             ap_server_conf,
+                             ap_server_conf, APLOGNO(00488)
                              "long lost child came home! (pid %ld)",
                              (long) pid.pid);
             }
@@ -2504,13 +2795,17 @@ static void server_main_loop(int remaining_children_to_start)
             continue;
         }
 
-        perform_idle_server_maintenance();
+        for (i = 0; i < num_buckets; i++) {
+            perform_idle_server_maintenance(i, num_buckets);
+        }
     }
 }
 
 static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
 {
+    int num_buckets = retained->num_buckets;
     int remaining_children_to_start;
+    int i;
 
     ap_log_pid(pconf, ap_pid_fname);
 
@@ -2525,10 +2820,20 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
         ap_scoreboard_image->global->running_generation = retained->my_generation;
     }
 
+    restart_pending = shutdown_pending = 0;
     set_signals();
-    /* Don't thrash... */
-    if (max_spare_threads < min_spare_threads + threads_per_child)
-        max_spare_threads = min_spare_threads + threads_per_child;
+
+    /* Don't thrash since num_buckets depends on the
+     * system and the number of online CPU cores...
+     */
+    if (ap_daemons_limit < num_buckets)
+        ap_daemons_limit = num_buckets;
+    if (ap_daemons_to_start < num_buckets)
+        ap_daemons_to_start = num_buckets;
+    if (min_spare_threads < threads_per_child * num_buckets)
+        min_spare_threads = threads_per_child * num_buckets;
+    if (max_spare_threads < min_spare_threads + threads_per_child * num_buckets)
+        max_spare_threads = min_spare_threads + threads_per_child * num_buckets;
 
     /* If we're doing a graceful_restart then we're going to see a lot
      * of children exiting immediately when we get into the main loop
@@ -2553,24 +2858,27 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
         retained->hold_off_on_exponential_spawning = 10;
     }
 
-    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00489)
                  "%s configured -- resuming normal operations",
                  ap_get_server_description());
-    ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00490)
                  "Server built: %s", ap_get_server_built());
     ap_log_command_line(plog, s);
+    ap_log_mpm_common(s);
 
-    restart_pending = shutdown_pending = 0;
     mpm_state = AP_MPMQ_RUNNING;
 
-    server_main_loop(remaining_children_to_start);
+    server_main_loop(remaining_children_to_start, num_buckets);
     mpm_state = AP_MPMQ_STOPPING;
 
     if (shutdown_pending && !retained->is_graceful) {
         /* Time to shut down:
          * Kill child processes, tell them to call child_exit, etc...
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
         ap_reclaim_child_processes(1, /* Start with SIGTERM */
                                    event_note_child_killed);
 
@@ -2578,7 +2886,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
             /* cleanup pid file on normal shutdown */
             ap_remove_pid(pconf, ap_pid_fname);
             ap_log_error(APLOG_MARK, APLOG_NOTICE, 0,
-                         ap_server_conf, "caught SIGTERM, shutting down");
+                         ap_server_conf, APLOGNO(00491) "caught SIGTERM, shutting down");
         }
         return DONE;
     } else if (shutdown_pending) {
@@ -2591,13 +2899,16 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
 
         /* Close our listeners, and then ask our children to do same */
         ap_close_listeners();
-        ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_GRACEFUL);
+        }
         ap_relieve_child_processes(event_note_child_killed);
 
         if (!child_fatal) {
             /* cleanup pid file on normal shutdown */
             ap_remove_pid(pconf, ap_pid_fname);
-            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00492)
                          "caught " AP_SIG_GRACEFUL_STOP_STRING
                          ", shutting down gracefully");
         }
@@ -2631,7 +2942,10 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
          * way, try and make sure that all of our processes are
          * really dead.
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
         ap_reclaim_child_processes(1, event_note_child_killed);
 
         return DONE;
@@ -2653,12 +2967,14 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
     ap_scoreboard_image->global->running_generation = retained->my_generation;
 
     if (retained->is_graceful) {
-        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00493)
                      AP_SIG_GRACEFUL_STRING
                      " received.  Doing graceful restart");
         /* wake up the children...time to die.  But we'll have more soon */
-        ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
-
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_GRACEFUL);
+        }
 
         /* This is mostly for debugging... so that we know what is still
          * gracefully dealing with existing request.
@@ -2670,11 +2986,14 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
          * and a SIGHUP, we may as well use the same signal, because some user
          * pthreads are stealing signals from us left and right.
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
 
         ap_reclaim_child_processes(1,  /* Start with SIGTERM */
                                    event_note_child_killed);
-        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00494)
                      "SIGHUP received.  Attempting to restart");
     }
 
@@ -2689,7 +3008,10 @@ static int event_open_logs(apr_pool_t * p, apr_pool_t * plog,
 {
     int startup = 0;
     int level_flags = 0;
+    int num_buckets = 0;
+    ap_listen_rec **listen_buckets;
     apr_status_t rv;
+    int i;
 
     pconf = p;
 
@@ -2706,14 +3028,64 @@ static int event_open_logs(apr_pool_t * p, apr_pool_t * plog,
         return DONE;
     }
 
-    if (!one_process) {
-        if ((rv = ap_event_pod_open(pconf, &pod))) {
+    if (one_process) {
+        num_buckets = 1;
+    }
+    else if (retained->is_graceful) {
+        /* Preserve the number of buckets on graceful restarts. */
+        num_buckets = retained->num_buckets;
+    }
+    if ((rv = ap_duplicate_listeners(pconf, ap_server_conf,
+                                     &listen_buckets, &num_buckets))) {
+        ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
+                     (startup ? NULL : s),
+                     "could not duplicate listeners");
+        return DONE;
+    }
+
+    all_buckets = apr_pcalloc(pconf, num_buckets * sizeof(*all_buckets));
+    for (i = 0; i < num_buckets; i++) {
+        if (!one_process && /* no POD in one_process mode */
+                (rv = ap_mpm_podx_open(pconf, &all_buckets[i].pod))) {
             ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
                          (startup ? NULL : s),
                          "could not open pipe-of-death");
             return DONE;
         }
+        all_buckets[i].listeners = listen_buckets[i];
+    }
+
+    if (retained->max_buckets < num_buckets) {
+        int new_max, *new_ptr;
+        new_max = retained->max_buckets * 2;
+        if (new_max < num_buckets) {
+            new_max = num_buckets;
+        }
+        new_ptr = (int *)apr_palloc(ap_pglobal, new_max * sizeof(int));
+        memcpy(new_ptr, retained->idle_spawn_rate,
+               retained->num_buckets * sizeof(int));
+        retained->idle_spawn_rate = new_ptr;
+        retained->max_buckets = new_max;
     }
+    if (retained->num_buckets < num_buckets) {
+        int rate_max = 1;
+        /* If new buckets are added, set their idle spawn rate to
+         * the highest so far, so that they get filled as quickly
+         * as the existing ones.
+         */
+        for (i = 0; i < retained->num_buckets; i++) {
+            if (rate_max < retained->idle_spawn_rate[i]) {
+                rate_max = retained->idle_spawn_rate[i];
+            }
+        }
+        for (/* up to date i */; i < num_buckets; i++) {
+            retained->idle_spawn_rate[i] = rate_max;
+        }
+    }
+    retained->num_buckets = num_buckets;
+
+    /* for skiplist */
+    srand((unsigned int)apr_time_now());
     return OK;
 }
 
@@ -2743,14 +3115,24 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
     if (!retained) {
         retained = ap_retained_data_create(userdata_key, sizeof(*retained));
         retained->max_daemons_limit = -1;
-        retained->idle_spawn_rate = 1;
     }
     ++retained->module_loads;
     if (retained->module_loads == 2) {
+        /* test for correct operation of fdqueue */
+        static apr_uint32_t foo1, foo2;
+
+        apr_atomic_set32(&foo1, 100);
+        foo2 = apr_atomic_add32(&foo1, -10);
+        if (foo2 != 100 || foo1 != 90) {
+            ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL, APLOGNO(02405)
+                         "atomics not working as expected - add32 of negative number");
+            return HTTP_INTERNAL_SERVER_ERROR;
+        }
+
         rv = apr_pollset_create(&event_pollset, 1, plog,
                                 APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
         if (rv != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
+            ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00495)
                          "Couldn't create a Thread Safe Pollset. "
                          "Is it supported on your platform?"
                          "Also check system or user limits!");
@@ -2759,10 +3141,12 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
         apr_pollset_destroy(event_pollset);
 
         if (!one_process && !foreground) {
+            /* before we detach, setup crash handlers to log to errorlog */
+            ap_fatal_signal_setup(ap_server_conf, pconf);
             rv = apr_proc_detach(no_detach ? APR_PROC_DETACH_FOREGROUND
                                  : APR_PROC_DETACH_DAEMONIZE);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00496)
                              "apr_proc_detach failed");
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
@@ -2780,11 +3164,75 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
     ap_daemons_limit = server_limit;
     threads_per_child = DEFAULT_THREADS_PER_CHILD;
     max_workers = ap_daemons_limit * threads_per_child;
+    had_healthy_child = 0;
     ap_extended_status = 0;
 
     return OK;
 }
 
+static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+                             apr_pool_t *ptemp, server_rec *s)
+{
+    struct {
+        struct timeout_queue *tail, *q;
+        apr_hash_t *hash;
+    } wc, ka;
+
+    /* Not needed in pre_config stage */
+    if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
+        return OK;
+    }
+
+    wc.tail = ka.tail = NULL;
+    wc.hash = apr_hash_make(ptemp);
+    ka.hash = apr_hash_make(ptemp);
+
+    TO_QUEUE_INIT(linger_q, pconf,
+                  apr_time_from_sec(MAX_SECS_TO_LINGER), NULL);
+    TO_QUEUE_INIT(short_linger_q, pconf,
+                  apr_time_from_sec(SECONDS_TO_LINGER), NULL);
+
+    for (; s; s = s->next) {
+        event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
+
+        ap_set_module_config(s->module_config, &mpm_event_module, sc);
+        if (!wc.tail) {
+            /* The main server uses the global queues */
+            TO_QUEUE_INIT(wc.q, pconf, s->timeout, NULL);
+            apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
+            wc.tail = write_completion_q = wc.q;
+
+            TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, NULL);
+            apr_hash_set(ka.hash, &s->keep_alive_timeout,
+                         sizeof s->keep_alive_timeout, ka.q);
+            ka.tail = keepalive_q = ka.q;
+        }
+        else {
+            /* The vhosts use any existing queue with the same timeout,
+             * or their own queue(s) if there isn't */
+            wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
+            if (!wc.q) {
+                TO_QUEUE_INIT(wc.q, pconf, s->timeout, wc.tail);
+                apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
+                wc.tail = wc.tail->next = wc.q;
+            }
+
+            ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout,
+                                sizeof s->keep_alive_timeout);
+            if (!ka.q) {
+                TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, ka.tail);
+                apr_hash_set(ka.hash, &s->keep_alive_timeout,
+                             sizeof s->keep_alive_timeout, ka.q);
+                ka.tail = ka.tail->next = ka.q;
+            }
+        }
+        sc->wc_q = wc.q;
+        sc->ka_q = ka.q;
+    }
+
+    return OK;
+}
+
 static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
                               apr_pool_t *ptemp, server_rec *s)
 {
@@ -2797,14 +3245,14 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (server_limit > MAX_SERVER_LIMIT) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00497)
                          "WARNING: ServerLimit of %d exceeds compile-time "
                          "limit of", server_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03105)
                          " %d servers, decreasing to %d.",
                          MAX_SERVER_LIMIT, MAX_SERVER_LIMIT);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00498)
                          "ServerLimit of %d exceeds compile-time limit "
                          "of %d, decreasing to match",
                          server_limit, MAX_SERVER_LIMIT);
@@ -2813,11 +3261,11 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (server_limit < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00499)
                          "WARNING: ServerLimit of %d not allowed, "
                          "increasing to 1.", server_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00500)
                          "ServerLimit of %d not allowed, increasing to 1",
                          server_limit);
         }
@@ -2832,7 +3280,7 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (server_limit != retained->first_server_limit) {
         /* don't need a startup console version here */
-        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00501)
                      "changing ServerLimit to %d from original value of %d "
                      "not allowed during restart",
                      server_limit, retained->first_server_limit);
@@ -2841,14 +3289,14 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (thread_limit > MAX_THREAD_LIMIT) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00502)
                          "WARNING: ThreadLimit of %d exceeds compile-time "
                          "limit of", thread_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03106)
                          " %d threads, decreasing to %d.",
                          MAX_THREAD_LIMIT, MAX_THREAD_LIMIT);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00503)
                          "ThreadLimit of %d exceeds compile-time limit "
                          "of %d, decreasing to match",
                          thread_limit, MAX_THREAD_LIMIT);
@@ -2857,11 +3305,11 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (thread_limit < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00504)
                          "WARNING: ThreadLimit of %d not allowed, "
                          "increasing to 1.", thread_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00505)
                          "ThreadLimit of %d not allowed, increasing to 1",
                          thread_limit);
         }
@@ -2876,7 +3324,7 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (thread_limit != retained->first_thread_limit) {
         /* don't need a startup console version here */
-        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00506)
                      "changing ThreadLimit to %d from original value of %d "
                      "not allowed during restart",
                      thread_limit, retained->first_thread_limit);
@@ -2885,17 +3333,17 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (threads_per_child > thread_limit) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00507)
                          "WARNING: ThreadsPerChild of %d exceeds ThreadLimit "
                          "of", threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03107)
                          " %d threads, decreasing to %d.",
                          thread_limit, thread_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03108)
                          " To increase, please see the ThreadLimit "
                          "directive.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00508)
                          "ThreadsPerChild of %d exceeds ThreadLimit "
                          "of %d, decreasing to match",
                          threads_per_child, thread_limit);
@@ -2904,11 +3352,11 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (threads_per_child < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00509)
                          "WARNING: ThreadsPerChild of %d not allowed, "
                          "increasing to 1.", threads_per_child);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00510)
                          "ThreadsPerChild of %d not allowed, increasing to 1",
                          threads_per_child);
         }
@@ -2917,17 +3365,17 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (max_workers < threads_per_child) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00511)
                          "WARNING: MaxRequestWorkers of %d is less than "
                          "ThreadsPerChild of", max_workers);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03109)
                          " %d, increasing to %d.  MaxRequestWorkers must be at "
                          "least as large",
                          threads_per_child, threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03110)
                          " as the number of threads in a single server.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00512)
                          "MaxRequestWorkers of %d is less than ThreadsPerChild "
                          "of %d, increasing to match",
                          max_workers, threads_per_child);
@@ -2941,18 +3389,18 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
         int tmp_max_workers = ap_daemons_limit * threads_per_child;
 
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00513)
                          "WARNING: MaxRequestWorkers of %d is not an integer "
                          "multiple of", max_workers);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03111)
                          " ThreadsPerChild of %d, decreasing to nearest "
                          "multiple %d,", threads_per_child,
                          tmp_max_workers);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03112)
                          " for a maximum of %d servers.",
                          ap_daemons_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00514)
                          "MaxRequestWorkers of %d is not an integer multiple "
                          "of ThreadsPerChild of %d, decreasing to nearest "
                          "multiple %d", max_workers, threads_per_child,
@@ -2963,17 +3411,17 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (ap_daemons_limit > server_limit) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00515)
                          "WARNING: MaxRequestWorkers of %d would require %d "
                          "servers and ", max_workers, ap_daemons_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03113)
                          " would exceed ServerLimit of %d, decreasing to %d.",
                          server_limit, server_limit * threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03114)
                          " To increase, please see the ServerLimit "
                          "directive.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00516)
                          "MaxRequestWorkers of %d would require %d servers and "
                          "exceed ServerLimit of %d, decreasing to %d",
                          max_workers, ap_daemons_limit, server_limit,
@@ -2983,13 +3431,13 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
 
     /* ap_daemons_to_start > ap_daemons_limit checked in ap_mpm_run() */
-    if (ap_daemons_to_start < 0) {
+    if (ap_daemons_to_start < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00517)
                          "WARNING: StartServers of %d not allowed, "
                          "increasing to 1.", ap_daemons_to_start);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00518)
                          "StartServers of %d not allowed, increasing to 1",
                          ap_daemons_to_start);
         }
@@ -2998,15 +3446,15 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (min_spare_threads < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00519)
                          "WARNING: MinSpareThreads of %d not allowed, "
                          "increasing to 1", min_spare_threads);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03115)
                          " to avoid almost certain server failure.");
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03116)
                          " Please read the documentation.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00520)
                          "MinSpareThreads of %d not allowed, increasing to 1",
                          min_spare_threads);
         }
@@ -3034,11 +3482,14 @@ static void event_hooks(apr_pool_t * p)
      * to retrieve it, so register as REALLY_FIRST
      */
     ap_hook_pre_config(event_pre_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
+    ap_hook_post_config(event_post_config, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_check_config(event_check_config, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm(event_run, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_query(event_query, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_register_timed_callback(event_register_timed_callback, NULL, NULL,
                                         APR_HOOK_MIDDLE);
+    ap_hook_pre_read_request(event_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE);
+    ap_hook_post_read_request(event_post_read_request, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_get_name(event_get_name, NULL, NULL, APR_HOOK_MIDDLE);
 }
 
@@ -3085,8 +3536,8 @@ static const char *set_max_workers(cmd_parms * cmd, void *dummy,
     if (err != NULL) {
         return err;
     }
-    if (!strcasecmp(cmd->cmd->name, "MaxRequestWorkers")) {
-        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL,
+    if (!strcasecmp(cmd->cmd->name, "MaxClients")) {
+        ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(00521)
                      "MaxClients is deprecated, use MaxRequestWorkers "
                      "instead.");
     }
@@ -3128,6 +3579,30 @@ static const char *set_thread_limit(cmd_parms * cmd, void *dummy,
     return NULL;
 }
 
+static const char *set_worker_factor(cmd_parms * cmd, void *dummy,
+                                     const char *arg)
+{
+    double val;
+    char *endptr;
+    const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+    if (err != NULL) {
+        return err;
+    }
+
+    val = strtod(arg, &endptr);
+    if (*endptr)
+        return "error parsing value";
+
+    if (val <= 0)
+        return "AsyncRequestWorkerFactor argument must be a positive number";
+
+    worker_factor = val * WORKER_FACTOR_SCALE;
+    if (worker_factor == 0)
+        worker_factor = 1;
+    return NULL;
+}
+
+
 static const command_rec event_cmds[] = {
     LISTEN_COMMANDS,
     AP_INIT_TAKE1("StartServers", set_daemons_to_start, NULL, RSRC_CONF,
@@ -3147,6 +3622,9 @@ static const command_rec event_cmds[] = {
     AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF,
                   "Maximum number of worker threads per child process for this "
                   "run of Apache - Upper limit for ThreadsPerChild"),
+    AP_INIT_TAKE1("AsyncRequestWorkerFactor", set_worker_factor, NULL, RSRC_CONF,
+                  "How many additional connects will be accepted per idle "
+                  "worker thread"),
     AP_GRACEFUL_SHUTDOWN_TIMEOUT_COMMAND,
     {NULL}
 };