]> granicus.if.org Git - apache/blobdiff - server/mpm/event/event.c
merging pre_close_connection hook, prep_lingering_close and ap_update_child() additio...
[apache] / server / mpm / event / event.c
index a66027a8fa9f733a6df6312fe3ef7a6d4999220e..e76dee3441bedfe86b1aa1dddd8fbe294ab21349 100644 (file)
  *
  * After a client completes the first request, the client can keep the
  * connection open to send more requests with the same socket.  This can save
- * signifigant overhead in creating TCP connections.  However, the major
+ * significant overhead in creating TCP connections.  However, the major
  * disadvantage is that Apache traditionally keeps an entire child
  * process/thread waiting for data from the client.  To solve this problem,
- * this MPM has a dedicated thread for handling both the Listenting sockets,
+ * this MPM has a dedicated thread for handling both the Listening sockets,
  * and all sockets that are in a Keep Alive status.
  *
  * The MPM assumes the underlying apr_pollset implementation is somewhat
@@ -30,7 +30,7 @@
  * enables the MPM to avoid extra high level locking or having to wake up the
  * listener thread when a keep-alive socket needs to be sent to it.
  *
- * This MPM not preform well on older platforms that do not have very good
+ * This MPM does not perform well on older platforms that do not have very good
  * threading, like Linux with a 2.4 kernel, but this does not matter, since we
  * require EPoll or KQueue.
  *
 #include "apr_poll.h"
 #include "apr_ring.h"
 #include "apr_queue.h"
+#include "apr_atomic.h"
 #define APR_WANT_STRFUNC
 #include "apr_want.h"
 #include "apr_version.h"
 
+#include <stdlib.h>
+
 #if APR_HAVE_UNISTD_H
 #include <unistd.h>
 #endif
@@ -82,8 +85,8 @@
 #include "http_config.h"        /* for read_config */
 #include "http_core.h"          /* for get_remote_host */
 #include "http_connection.h"
+#include "http_protocol.h"
 #include "ap_mpm.h"
-#include "pod.h"
 #include "mpm_common.h"
 #include "ap_listen.h"
 #include "scoreboard.h"
 #include "mpm_default.h"
 #include "http_vhost.h"
 #include "unixd.h"
+#include "apr_skiplist.h"
 
 #include <signal.h>
 #include <limits.h>             /* for INT_MAX */
 
 
-#if HAVE_SERF
-#include "mod_serf.h"
-#include "serf.h"
-#endif
-
 /* Limit on the total --- clients will be locked out if more servers than
  * this are needed.  It is intended solely to keep the server from crashing
  * when things get out of hand.
 #define apr_time_from_msec(x) (x * 1000)
 #endif
 
+#ifndef MAX_SECS_TO_LINGER
+#define MAX_SECS_TO_LINGER 30
+#endif
+#define SECONDS_TO_LINGER  2
+
 /*
  * Actual definitions of config globals
  */
 
+#ifndef DEFAULT_WORKER_FACTOR
+#define DEFAULT_WORKER_FACTOR 2
+#endif
+#define WORKER_FACTOR_SCALE   16  /* scale factor to allow fractional values */
+static unsigned int worker_factor = DEFAULT_WORKER_FACTOR * WORKER_FACTOR_SCALE;
+
 static int threads_per_child = 0;   /* Worker threads per child */
 static int ap_daemons_to_start = 0;
 static int min_spare_threads = 0;
 static int max_spare_threads = 0;
 static int ap_daemons_limit = 0;
-static int max_clients = 0;
+static int max_workers = 0;
 static int server_limit = 0;
 static int thread_limit = 0;
+static int had_healthy_child = 0;
 static int dying = 0;
 static int workers_may_exit = 0;
 static int start_thread_may_exit = 0;
 static int listener_may_exit = 0;
-static int requests_this_child;
 static int num_listensocks = 0;
+static apr_int32_t conns_this_child;        /* MaxConnectionsPerChild, only access
+                                               in listener thread */
+static apr_uint32_t connection_count = 0;   /* Number of open connections */
+static apr_uint32_t lingering_count = 0;    /* Number of connections in lingering close */
+static apr_uint32_t suspended_count = 0;    /* Number of suspended connections */
+static apr_uint32_t clogged_count = 0;      /* Number of threads processing ssl conns */
 static int resource_shortage = 0;
 static fd_queue_t *worker_queue;
 static fd_queue_info_t *worker_queue_info;
 static int mpm_state = AP_MPMQ_STARTING;
-static int sick_child_detected;
-static ap_generation_t my_generation = 0;
 
 static apr_thread_mutex_t *timeout_mutex;
-APR_RING_HEAD(timeout_head_t, conn_state_t);
-static struct timeout_head_t timeout_head, keepalive_timeout_head;
 
-static apr_pollset_t *event_pollset;
+module AP_MODULE_DECLARE_DATA mpm_event_module;
 
-#if HAVE_SERF
-typedef struct {
-    apr_pollset_t *pollset;
-    apr_pool_t *pool;
-} s_baton_t;
+/* forward declare */
+struct event_srv_cfg_s;
+typedef struct event_srv_cfg_s event_srv_cfg;
 
-static serf_context_t *g_serf;
-#endif
+struct event_conn_state_t {
+    /** APR_RING of expiration timeouts */
+    APR_RING_ENTRY(event_conn_state_t) timeout_list;
+    /** the time when the entry was queued */
+    apr_time_t queue_timestamp;
+    /** connection record this struct refers to */
+    conn_rec *c;
+    /** request record (if any) this struct refers to */
+    request_rec *r;
+    /** server config this struct refers to */
+    event_srv_cfg *sc;
+    /** is the current conn_rec suspended?  (disassociated with
+     * a particular MPM thread; for suspend_/resume_connection
+     * hooks)
+     */
+    int suspended;
+    /** memory pool to allocate from */
+    apr_pool_t *p;
+    /** bucket allocator */
+    apr_bucket_alloc_t *bucket_alloc;
+    /** poll file descriptor information */
+    apr_pollfd_t pfd;
+    /** public parts of the connection state */
+    conn_state_t pub;
+};
+APR_RING_HEAD(timeout_head_t, event_conn_state_t);
+
+struct timeout_queue {
+    struct timeout_head_t head;
+    int count, *total;
+    apr_interval_time_t timeout;
+    struct timeout_queue *next;
+};
+/*
+ * Several timeout queues that use different timeouts, so that we always can
+ * simply append to the end.
+ *   write_completion_q uses vhost's TimeOut
+ *   keepalive_q        uses vhost's KeepAliveTimeOut
+ *   linger_q           uses MAX_SECS_TO_LINGER
+ *   short_linger_q     uses SECONDS_TO_LINGER
+ */
+static struct timeout_queue *write_completion_q,
+                            *keepalive_q,
+                            *linger_q,
+                            *short_linger_q;
+
+static apr_pollfd_t *listener_pollfd;
+
+/*
+ * Macros for accessing struct timeout_queue.
+ * For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held.
+ */
+#define TO_QUEUE_APPEND(q, el)                                                \
+    do {                                                                      \
+        APR_RING_INSERT_TAIL(&(q)->head, el, event_conn_state_t,              \
+                             timeout_list);                                   \
+        ++*(q)->total;                                                        \
+        ++(q)->count;                                                         \
+    } while (0)
+
+#define TO_QUEUE_REMOVE(q, el)                                                \
+    do {                                                                      \
+        APR_RING_REMOVE(el, timeout_list);                                    \
+        --*(q)->total;                                                        \
+        --(q)->count;                                                         \
+    } while (0)
+
+#define TO_QUEUE_INIT(q, p, t, v)                                             \
+    do {                                                                      \
+        struct timeout_queue *b = (v);                                        \
+        (q) = apr_palloc((p), sizeof *(q));                                   \
+        APR_RING_INIT(&(q)->head, event_conn_state_t, timeout_list);          \
+        (q)->total = (b) ? (b)->total : apr_pcalloc((p), sizeof *(q)->total); \
+        (q)->count = 0;                                                       \
+        (q)->timeout = (t);                                                   \
+        (q)->next = NULL;                                                     \
+    } while (0)
+
+#define TO_QUEUE_ELEM_INIT(el) APR_RING_ELEM_INIT(el, timeout_list)
+
+/*
+ * The pollset for sockets that are in any of the timeout queues. Currently
+ * we use the timeout_mutex to make sure that connections are added/removed
+ * atomically to/from both event_pollset and a timeout queue. Otherwise
+ * some confusion can happen under high load if timeout queues and pollset
+ * get out of sync.
+ * XXX: It should be possible to make the lock unnecessary in many or even all
+ * XXX: cases.
+ */
+static apr_pollset_t *event_pollset;
 
 /* The structure used to pass unique initialization info to each thread */
 typedef struct
@@ -210,15 +308,11 @@ typedef enum
 {
     PT_CSD,
     PT_ACCEPT
-#if HAVE_SERF
-    , PT_SERF
-#endif
 } poll_type_e;
 
 typedef struct
 {
     poll_type_e type;
-    int bypass_push;
     void *baton;
 } listener_poll_type;
 
@@ -230,21 +324,51 @@ typedef struct event_retained_data {
     int first_server_limit;
     int first_thread_limit;
     int module_loads;
+    int sick_child_detected;
+    ap_generation_t my_generation;
+    int volatile is_graceful; /* set from signal handler */
+    int maxclients_reported;
+    /*
+     * The max child slot ever assigned, preserved across restarts.  Necessary
+     * to deal with MaxRequestWorkers changes across AP_SIG_GRACEFUL restarts.
+     * We use this value to optimize routines that have to scan the entire
+     * scoreboard.
+     */
+    int max_daemons_limit;
+    /*
+     * idle_spawn_rate is the number of children that will be spawned on the
+     * next maintenance cycle if there aren't enough idle servers.  It is
+     * maintained per listeners bucket, doubled up to MAX_SPAWN_RATE, and
+     * reset only when a cycle goes by without the need to spawn.
+     */
+    int *idle_spawn_rate;
+#ifndef MAX_SPAWN_RATE
+#define MAX_SPAWN_RATE        (32)
+#endif
+    int hold_off_on_exponential_spawning;
+    /*
+     * Current number of listeners buckets and maximum reached accross
+     * restarts (to size retained data according to dynamic num_buckets,
+     * eg. idle_spawn_rate).
+     */
+    int num_buckets, max_buckets;
 } event_retained_data;
 static event_retained_data *retained;
+typedef struct event_child_bucket {
+    ap_pod_t *pod;
+    ap_listen_rec *listeners;
+} event_child_bucket;
+static event_child_bucket *all_buckets, /* All listeners buckets */
+                          *my_bucket;   /* Current child bucket */
+
+struct event_srv_cfg_s {
+    struct timeout_queue *wc_q,
+                         *ka_q;
+};
 
 #define ID_FROM_CHILD_THREAD(c, t)    ((c * thread_limit) + t)
 
-/*
- * The max child slot ever assigned, preserved across restarts.  Necessary
- * to deal with MaxClients changes across AP_SIG_GRACEFUL restarts.  We
- * use this value to optimize routines that have to scan the entire
- * scoreboard.
- */
-static int max_daemons_limit = -1;
-
-static ap_event_pod_t *pod;
-
 /* The event MPM respects a couple of runtime flags that can aid
  * in debugging. Setting the -DNO_DETACH flag will prevent the root process
  * from detaching from its controlling terminal. Additionally, setting
@@ -285,6 +409,36 @@ static apr_os_thread_t *listener_os_thread;
  */
 static apr_socket_t **worker_sockets;
 
+static void disable_listensocks(int process_slot)
+{
+    int i;
+    for (i = 0; i < num_listensocks; i++) {
+        apr_pollset_remove(event_pollset, &listener_pollfd[i]);
+    }
+    ap_scoreboard_image->parent[process_slot].not_accepting = 1;
+}
+
+static void enable_listensocks(int process_slot)
+{
+    int i;
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00457)
+                 "Accepting new connections again: "
+                 "%u active conns (%u lingering/%u clogged/%u suspended), "
+                 "%u idle workers",
+                 apr_atomic_read32(&connection_count),
+                 apr_atomic_read32(&lingering_count),
+                 apr_atomic_read32(&clogged_count),
+                 apr_atomic_read32(&suspended_count),
+                 ap_queue_info_get_idlers(worker_queue_info));
+    for (i = 0; i < num_listensocks; i++)
+        apr_pollset_add(event_pollset, &listener_pollfd[i]);
+    /*
+     * XXX: This is not yet optimal. If many workers suddenly become available,
+     * XXX: the parent may kill some processes off too soon.
+     */
+    ap_scoreboard_image->parent[process_slot].not_accepting = 0;
+}
+
 static void close_worker_sockets(void)
 {
     int i;
@@ -309,7 +463,7 @@ static void wakeup_listener(void)
     }
 
     /* unblock the listener if it's waiting for a worker */
-    ap_queue_info_term(worker_queue_info); 
+    ap_queue_info_term(worker_queue_info);
 
     /*
      * we should just be able to "kill(ap_my_pid, LISTENER_SIGNAL)" on all
@@ -358,7 +512,7 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
     *rv = APR_SUCCESS;
     switch (query_code) {
     case AP_MPMQ_MAX_DAEMON_USED:
-        *result = max_daemons_limit;
+        *result = retained->max_daemons_limit;
         break;
     case AP_MPMQ_IS_THREADED:
         *result = AP_MPMQ_STATIC;
@@ -369,9 +523,6 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
     case AP_MPMQ_IS_ASYNC:
         *result = 1;
         break;
-    case AP_MPMQ_HAS_SERF:
-        *result = 1;
-        break;
     case AP_MPMQ_HARD_LIMIT_DAEMONS:
         *result = server_limit;
         break;
@@ -403,7 +554,7 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
         *result = mpm_state;
         break;
     case AP_MPMQ_GENERATION:
-        *result = my_generation;
+        *result = retained->my_generation;
         break;
     default:
         *rv = APR_ENOTIMPL;
@@ -412,10 +563,47 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
     return OK;
 }
 
-static apr_status_t event_note_child_killed(int childnum)
+static void event_note_child_killed(int childnum, pid_t pid, ap_generation_t gen)
 {
-    ap_scoreboard_image->parent[childnum].pid = 0;
-    return APR_SUCCESS;
+    if (childnum != -1) { /* child had a scoreboard slot? */
+        ap_run_child_status(ap_server_conf,
+                            ap_scoreboard_image->parent[childnum].pid,
+                            ap_scoreboard_image->parent[childnum].generation,
+                            childnum, MPM_CHILD_EXITED);
+        ap_scoreboard_image->parent[childnum].pid = 0;
+    }
+    else {
+        ap_run_child_status(ap_server_conf, pid, gen, -1, MPM_CHILD_EXITED);
+    }
+}
+
+static void event_note_child_started(int slot, pid_t pid)
+{
+    ap_scoreboard_image->parent[slot].pid = pid;
+    ap_run_child_status(ap_server_conf,
+                        ap_scoreboard_image->parent[slot].pid,
+                        retained->my_generation, slot, MPM_CHILD_STARTED);
+}
+
+static void event_note_child_lost_slot(int slot, pid_t newpid)
+{
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00458)
+                 "pid %" APR_PID_T_FMT " taking over scoreboard slot from "
+                 "%" APR_PID_T_FMT "%s",
+                 newpid,
+                 ap_scoreboard_image->parent[slot].pid,
+                 ap_scoreboard_image->parent[slot].quiescing ?
+                 " (quiescing)" : "");
+    ap_run_child_status(ap_server_conf,
+                        ap_scoreboard_image->parent[slot].pid,
+                        ap_scoreboard_image->parent[slot].generation,
+                        slot, MPM_CHILD_LOST_SLOT);
+    /* Don't forget about this exiting child process, or we
+     * won't be able to kill it if it doesn't exit by the
+     * time the server is shut down.
+     */
+    ap_register_extra_mpm_process(ap_scoreboard_image->parent[slot].pid,
+                                  ap_scoreboard_image->parent[slot].generation);
 }
 
 static const char *event_get_name(void)
@@ -431,6 +619,11 @@ static void clean_child_exit(int code)
     if (pchild) {
         apr_pool_destroy(pchild);
     }
+
+    if (one_process) {
+        event_note_child_killed(/* slot */ 0, 0, 0);
+    }
+
     exit(code);
 }
 
@@ -448,7 +641,24 @@ static int child_fatal;
 /* volatile because they're updated from a signal handler */
 static int volatile shutdown_pending;
 static int volatile restart_pending;
-static int volatile is_graceful;
+
+static apr_status_t decrement_connection_count(void *cs_)
+{
+    event_conn_state_t *cs = cs_;
+    switch (cs->pub.state) {
+        case CONN_STATE_LINGER_NORMAL:
+        case CONN_STATE_LINGER_SHORT:
+            apr_atomic_dec32(&lingering_count);
+            break;
+        case CONN_STATE_SUSPENDED:
+            apr_atomic_dec32(&suspended_count);
+            break;
+        default:
+            break;
+    }
+    apr_atomic_dec32(&connection_count);
+    return APR_SUCCESS;
+}
 
 /*
  * ap_start_shutdown() and ap_start_restart(), below, are a first stab at
@@ -456,7 +666,7 @@ static int volatile is_graceful;
  * Previously this was initiated in sig_term() and restart() signal handlers,
  * but we want to be able to start a shutdown/restart from other sources --
  * e.g. on Win32, from the service manager. Now the service manager can
- * call ap_start_shutdown() or ap_start_restart() as appropiate.  Note that
+ * call ap_start_shutdown() or ap_start_restart() as appropriate.  Note that
  * these functions can also be called by the child processes, since global
  * variables are no longer used to pass on the required action to the parent.
  *
@@ -480,7 +690,7 @@ static void ap_start_shutdown(int graceful)
         return;
     }
     shutdown_pending = 1;
-    is_graceful = graceful;
+    retained->is_graceful = graceful;
 }
 
 /* do a graceful restart if graceful == 1 */
@@ -492,7 +702,7 @@ static void ap_start_restart(int graceful)
         return;
     }
     restart_pending = 1;
-    is_graceful = graceful;
+    retained->is_graceful = graceful;
 }
 
 static void sig_term(int sig)
@@ -521,34 +731,37 @@ static void set_signals(void)
 
     sa.sa_handler = sig_term;
     if (sigaction(SIGTERM, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00459)
                      "sigaction(SIGTERM)");
 #ifdef AP_SIG_GRACEFUL_STOP
     if (sigaction(AP_SIG_GRACEFUL_STOP, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00460)
                      "sigaction(" AP_SIG_GRACEFUL_STOP_STRING ")");
 #endif
 #ifdef SIGINT
     if (sigaction(SIGINT, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00461)
                      "sigaction(SIGINT)");
 #endif
 #ifdef SIGXCPU
     sa.sa_handler = SIG_DFL;
     if (sigaction(SIGXCPU, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00462)
                      "sigaction(SIGXCPU)");
 #endif
 #ifdef SIGXFSZ
-    sa.sa_handler = SIG_DFL;
+    /* For systems following the LFS standard, ignoring SIGXFSZ allows
+     * a write() beyond the 2GB limit to fail gracefully with E2BIG
+     * rather than terminate the process. */
+    sa.sa_handler = SIG_IGN;
     if (sigaction(SIGXFSZ, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00463)
                      "sigaction(SIGXFSZ)");
 #endif
 #ifdef SIGPIPE
     sa.sa_handler = SIG_IGN;
     if (sigaction(SIGPIPE, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00464)
                      "sigaction(SIGPIPE)");
 #endif
 
@@ -558,10 +771,10 @@ static void set_signals(void)
     sigaddset(&sa.sa_mask, AP_SIG_GRACEFUL);
     sa.sa_handler = restart;
     if (sigaction(SIGHUP, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00465)
                      "sigaction(SIGHUP)");
     if (sigaction(AP_SIG_GRACEFUL, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00466)
                      "sigaction(" AP_SIG_GRACEFUL_STRING ")");
 #else
     if (!one_process) {
@@ -569,7 +782,7 @@ static void set_signals(void)
         apr_signal(SIGXCPU, SIG_DFL);
 #endif /* SIGXCPU */
 #ifdef SIGXFSZ
-        apr_signal(SIGXFSZ, SIG_DFL);
+        apr_signal(SIGXFSZ, SIG_IGN);
 #endif /* SIGXFSZ */
     }
 
@@ -590,50 +803,256 @@ static void set_signals(void)
 #endif
 }
 
-/*****************************************************************
- * Child process main loop.
+static void notify_suspend(event_conn_state_t *cs)
+{
+    ap_run_suspend_connection(cs->c, cs->r);
+    cs->suspended = 1;
+    cs->c->sbh = NULL;
+}
+
+static void notify_resume(event_conn_state_t *cs, ap_sb_handle_t *sbh)
+{
+    cs->c->sbh = sbh;
+    cs->suspended = 0;
+    ap_run_resume_connection(cs->c, cs->r);
+}
+
+static int start_lingering_close_common(event_conn_state_t *cs, int in_worker)
+{
+    apr_status_t rv;
+    struct timeout_queue *q;
+    apr_socket_t *csd = cs->pfd.desc.s;
+#ifdef AP_DEBUG
+    {
+        rv = apr_socket_timeout_set(csd, 0);
+        AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+    }
+#else
+    apr_socket_timeout_set(csd, 0);
+#endif
+    cs->queue_timestamp = apr_time_now();
+    /*
+     * If some module requested a shortened waiting period, only wait for
+     * 2s (SECONDS_TO_LINGER). This is useful for mitigating certain
+     * DoS attacks.
+     */
+    if (apr_table_get(cs->c->notes, "short-lingering-close")) {
+        q = short_linger_q;
+        cs->pub.state = CONN_STATE_LINGER_SHORT;
+    }
+    else {
+        q = linger_q;
+        cs->pub.state = CONN_STATE_LINGER_NORMAL;
+    }
+    apr_atomic_inc32(&lingering_count);
+    if (in_worker) { 
+        notify_suspend(cs);
+    }
+    else {
+        cs->c->sbh = NULL;
+    }
+    apr_thread_mutex_lock(timeout_mutex);
+    TO_QUEUE_APPEND(q, cs);
+    cs->pfd.reqevents = (
+            cs->pub.sense == CONN_SENSE_WANT_WRITE ? APR_POLLOUT :
+                    APR_POLLIN) | APR_POLLHUP | APR_POLLERR;
+    cs->pub.sense = CONN_SENSE_DEFAULT;
+    rv = apr_pollset_add(event_pollset, &cs->pfd);
+    apr_thread_mutex_unlock(timeout_mutex);
+    if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03092)
+                     "start_lingering_close: apr_pollset_add failure");
+        apr_thread_mutex_lock(timeout_mutex);
+        TO_QUEUE_REMOVE(q, cs);
+        apr_thread_mutex_unlock(timeout_mutex);
+        apr_socket_close(cs->pfd.desc.s);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
+    }
+    return 1;
+}
+
+/*
+ * Close our side of the connection, flushing data to the client first.
+ * Pre-condition: cs is not in any timeout queue and not in the pollset,
+ *                timeout_mutex is not locked
+ * return: 0 if connection is fully closed,
+ *         1 if connection is lingering
+ * May only be called by worker thread.
  */
+static int start_lingering_close_blocking(event_conn_state_t *cs)
+{
+    if (ap_start_lingering_close(cs->c)) {
+        notify_suspend(cs);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
+    }
+    return start_lingering_close_common(cs, 1);
+}
 
-static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock,
-                          conn_state_t * cs, int my_child_num,
+/*
+ * Close our side of the connection, NOT flushing data to the client.
+ * This should only be called if there has been an error or if we know
+ * that our send buffers are empty.
+ * Pre-condition: cs is not in any timeout queue and not in the pollset,
+ *                timeout_mutex is not locked
+ * return: 0 if connection is fully closed,
+ *         1 if connection is lingering
+ * may be called by listener thread
+ */
+static int start_lingering_close_nonblocking(event_conn_state_t *cs)
+{
+    conn_rec *c = cs->c;
+    apr_socket_t *csd = cs->pfd.desc.s;
+
+    if (ap_prep_lingering_close(c)
+        || c->aborted
+        || ap_shutdown_conn(c, 0) != APR_SUCCESS || c->aborted
+        || apr_socket_shutdown(csd, APR_SHUTDOWN_WRITE) != APR_SUCCESS) {
+        apr_socket_close(csd);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
+    }
+    return start_lingering_close_common(cs, 0);
+}
+
+/*
+ * forcibly close a lingering connection after the lingering period has
+ * expired
+ * Pre-condition: cs is not in any timeout queue and not in the pollset
+ * return: irrelevant (need same prototype as start_lingering_close)
+ */
+static int stop_lingering_close(event_conn_state_t *cs)
+{
+    apr_status_t rv;
+    apr_socket_t *csd = ap_get_conn_socket(cs->c);
+    ap_log_error(APLOG_MARK, APLOG_TRACE4, 0, ap_server_conf,
+                 "socket reached timeout in lingering-close state");
+    rv = apr_socket_close(csd);
+    if (rv != APR_SUCCESS) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00468) "error closing socket");
+        AP_DEBUG_ASSERT(0);
+    }
+    ap_push_pool(worker_queue_info, cs->p);
+    return 0;
+}
+
+/*
+ * This runs before any non-MPM cleanup code on the connection;
+ * if the connection is currently suspended as far as modules
+ * know, provide notification of resumption.
+ */
+static apr_status_t ptrans_pre_cleanup(void *dummy)
+{
+    event_conn_state_t *cs = dummy;
+
+    if (cs->suspended) {
+        notify_resume(cs, NULL);
+    }
+    return APR_SUCCESS;
+}
+
+/*
+ * event_pre_read_request() and event_request_cleanup() track the
+ * current r for a given connection.
+ */
+static apr_status_t event_request_cleanup(void *dummy)
+{
+    conn_rec *c = dummy;
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
+
+    cs->r = NULL;
+    return APR_SUCCESS;
+}
+
+static void event_pre_read_request(request_rec *r, conn_rec *c)
+{
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
+
+    cs->r = r;
+    cs->sc = ap_get_module_config(ap_server_conf->module_config,
+                                  &mpm_event_module);
+    apr_pool_cleanup_register(r->pool, c, event_request_cleanup,
+                              apr_pool_cleanup_null);
+}
+
+/*
+ * event_post_read_request() tracks the current server config for a
+ * given request.
+ */
+static int event_post_read_request(request_rec *r)
+{
+    conn_rec *c = r->connection;
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
+
+    /* To preserve legacy behaviour (consistent with other MPMs), use
+     * the keepalive timeout from the base server (first on this IP:port)
+     * when none is explicitly configured on this server.
+     */
+    if (r->server->keep_alive_timeout_set) {
+        cs->sc = ap_get_module_config(r->server->module_config,
+                                      &mpm_event_module);
+    }
+    else {
+        cs->sc = ap_get_module_config(c->base_server->module_config,
+                                      &mpm_event_module);
+    }
+    return OK;
+}
+
+/*
+ * process one connection in the worker
+ */
+static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock,
+                          event_conn_state_t * cs, int my_child_num,
                           int my_thread_num)
 {
     conn_rec *c;
-    listener_poll_type *pt;
     long conn_id = ID_FROM_CHILD_THREAD(my_child_num, my_thread_num);
     int rc;
     ap_sb_handle_t *sbh;
 
+    /* XXX: This will cause unbounded mem usage for long lasting connections */
     ap_create_sb_handle(&sbh, p, my_child_num, my_thread_num);
 
     if (cs == NULL) {           /* This is a new connection */
-
-        cs = apr_pcalloc(p, sizeof(conn_state_t));
-
-        pt = apr_pcalloc(p, sizeof(*pt));
-
+        listener_poll_type *pt = apr_pcalloc(p, sizeof(*pt));
+        cs = apr_pcalloc(p, sizeof(event_conn_state_t));
         cs->bucket_alloc = apr_bucket_alloc_create(p);
         c = ap_run_create_connection(p, ap_server_conf, sock,
                                      conn_id, sbh, cs->bucket_alloc);
+        if (!c) {
+            ap_push_pool(worker_queue_info, p);
+            return;
+        }
+        apr_atomic_inc32(&connection_count);
+        apr_pool_cleanup_register(c->pool, cs, decrement_connection_count,
+                                  apr_pool_cleanup_null);
+        ap_set_module_config(c->conn_config, &mpm_event_module, cs);
         c->current_thread = thd;
         cs->c = c;
-        c->cs = cs;
+        c->cs = &(cs->pub);
         cs->p = p;
+        cs->sc = ap_get_module_config(ap_server_conf->module_config,
+                                      &mpm_event_module);
         cs->pfd.desc_type = APR_POLL_SOCKET;
         cs->pfd.reqevents = APR_POLLIN;
         cs->pfd.desc.s = sock;
         pt->type = PT_CSD;
-        pt->bypass_push = 1;
         pt->baton = cs;
         cs->pfd.client_data = pt;
-        APR_RING_ELEM_INIT(cs, timeout_list);
+        apr_pool_pre_cleanup_register(p, cs, ptrans_pre_cleanup);
+        TO_QUEUE_ELEM_INIT(cs);
 
         ap_update_vhost_given_ip(c);
 
         rc = ap_run_pre_connection(c, sock);
         if (rc != OK && rc != DONE) {
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                         "process_socket: connection aborted");
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(00469)
+                          "process_socket: connection aborted");
             c->aborted = 1;
         }
 
@@ -650,29 +1069,35 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
          * When the accept filter is active, sockets are kept in the
          * kernel until a HTTP request is received.
          */
-        cs->state = CONN_STATE_READ_REQUEST_LINE;
+        cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
 
+        cs->pub.sense = CONN_SENSE_DEFAULT;
     }
     else {
         c = cs->c;
-        c->sbh = sbh;
-        pt = cs->pfd.client_data;
+        notify_resume(cs, sbh);
         c->current_thread = thd;
+        /* Subsequent request on a conn, and thread number is part of ID */
+        c->id = conn_id;
     }
 
     if (c->clogging_input_filters && !c->aborted) {
-        /* Since we have an input filter which 'cloggs' the input stream,
-         * like mod_ssl, lets just do the normal read from input filters,
-         * like the Worker MPM does.
+        /* Since we have an input filter which 'clogs' the input stream,
+         * like mod_ssl used to, lets just do the normal read from input
+         * filters, like the Worker MPM does. Filters that need to write
+         * where they would otherwise read, or read where they would
+         * otherwise write, should set the sense appropriately.
          */
+        apr_atomic_inc32(&clogged_count);
         ap_run_process_connection(c);
-        if (cs->state != CONN_STATE_SUSPENDED) {
-            cs->state = CONN_STATE_LINGER;
+        if (cs->pub.state != CONN_STATE_SUSPENDED) {
+            cs->pub.state = CONN_STATE_LINGER;
         }
+        apr_atomic_dec32(&clogged_count);
     }
 
 read_request:
-    if (cs->state == CONN_STATE_READ_REQUEST_LINE) {
+    if (cs->pub.state == CONN_STATE_READ_REQUEST_LINE) {
         if (!c->aborted) {
             ap_run_process_connection(c);
 
@@ -682,60 +1107,58 @@ read_request:
              */
         }
         else {
-            cs->state = CONN_STATE_LINGER;
+            cs->pub.state = CONN_STATE_LINGER;
         }
     }
 
-    if (cs->state == CONN_STATE_WRITE_COMPLETION) {
+    if (cs->pub.state == CONN_STATE_WRITE_COMPLETION) {
         ap_filter_t *output_filter = c->output_filters;
         apr_status_t rv;
+        ap_update_child_status_from_conn(sbh, SERVER_BUSY_WRITE, c);
         while (output_filter->next != NULL) {
             output_filter = output_filter->next;
         }
         rv = output_filter->frec->filter_func.out_func(output_filter, NULL);
         if (rv != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf,
-                     "network write failure in core output filter");
-            cs->state = CONN_STATE_LINGER;
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(00470)
+                          "network write failure in core output filter");
+            cs->pub.state = CONN_STATE_LINGER;
         }
         else if (c->data_in_output_filters) {
             /* Still in WRITE_COMPLETION_STATE:
              * Set a write timeout for this connection, and let the
              * event thread poll for writeability.
              */
-            cs->expiration_time = ap_server_conf->timeout + apr_time_now();
+            cs->queue_timestamp = apr_time_now();
+            notify_suspend(cs);
             apr_thread_mutex_lock(timeout_mutex);
-            APR_RING_INSERT_TAIL(&timeout_head, cs, conn_state_t, timeout_list);
-            apr_thread_mutex_unlock(timeout_mutex);
-            pt->bypass_push = 0;
-            cs->pfd.reqevents = APR_POLLOUT | APR_POLLHUP | APR_POLLERR;
+            TO_QUEUE_APPEND(cs->sc->wc_q, cs);
+            cs->pfd.reqevents = (
+                    cs->pub.sense == CONN_SENSE_WANT_READ ? APR_POLLIN :
+                            APR_POLLOUT) | APR_POLLHUP | APR_POLLERR;
+            cs->pub.sense = CONN_SENSE_DEFAULT;
             rc = apr_pollset_add(event_pollset, &cs->pfd);
-            return 1;
+            apr_thread_mutex_unlock(timeout_mutex);
+            return;
         }
         else if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted ||
             listener_may_exit) {
-            c->cs->state = CONN_STATE_LINGER;
+            cs->pub.state = CONN_STATE_LINGER;
         }
         else if (c->data_in_input_filters) {
-            cs->state = CONN_STATE_READ_REQUEST_LINE;
+            cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
             goto read_request;
         }
         else {
-            cs->state = CONN_STATE_CHECK_REQUEST_LINE_READABLE;
+            cs->pub.state = CONN_STATE_CHECK_REQUEST_LINE_READABLE;
         }
     }
 
-    if (cs->state == CONN_STATE_LINGER) {
-        ap_lingering_close(c);
-        apr_pool_clear(p);
-        ap_push_pool(worker_queue_info, p);
-        return 0;
+    if (cs->pub.state == CONN_STATE_LINGER) {
+        start_lingering_close_blocking(cs);
     }
-    else if (cs->state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
-        apr_status_t rc;
-        listener_poll_type *pt = (listener_poll_type *) cs->pfd.client_data;
-
-        /* It greatly simplifies the logic to use a single timeout value here
+    else if (cs->pub.state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
+        /* It greatly simplifies the logic to use a single timeout value per q
          * because the new element can just be added to the end of the list and
          * it will stay sorted in expiration time sequence.  If brand new
          * sockets are sent to the event thread for a readability check, this
@@ -743,36 +1166,59 @@ read_request:
          * timeout today.  With a normal client, the socket will be readable in
          * a few milliseconds anyway.
          */
-        cs->expiration_time = ap_server_conf->keep_alive_timeout +
-                              apr_time_now();
+        cs->queue_timestamp = apr_time_now();
+        notify_suspend(cs);
         apr_thread_mutex_lock(timeout_mutex);
-        APR_RING_INSERT_TAIL(&keepalive_timeout_head, cs, conn_state_t, timeout_list);
-        apr_thread_mutex_unlock(timeout_mutex);
+        TO_QUEUE_APPEND(cs->sc->ka_q, cs);
 
-        pt->bypass_push = 0;
         /* Add work to pollset. */
         cs->pfd.reqevents = APR_POLLIN;
         rc = apr_pollset_add(event_pollset, &cs->pfd);
+        apr_thread_mutex_unlock(timeout_mutex);
 
         if (rc != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(03093)
                          "process_socket: apr_pollset_add failure");
             AP_DEBUG_ASSERT(rc == APR_SUCCESS);
         }
     }
-    return 1;
+    else if (cs->pub.state == CONN_STATE_SUSPENDED) {
+        apr_atomic_inc32(&suspended_count);
+        notify_suspend(cs);
+    }
 }
 
-/* requests_this_child has gone to zero or below.  See if the admin coded
+/* conns_this_child has gone to zero or below.  See if the admin coded
    "MaxConnectionsPerChild 0", and keep going in that case.  Doing it this way
    simplifies the hot path in worker_thread */
 static void check_infinite_requests(void)
 {
     if (ap_max_requests_per_child) {
+        ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                     "Stopping process due to MaxConnectionsPerChild");
         signal_threads(ST_GRACEFUL);
     }
     else {
-        requests_this_child = INT_MAX;  /* keep going */
+        /* keep going */
+        conns_this_child = APR_INT32_MAX;
+    }
+}
+
+static void close_listeners(int process_slot, int *closed)
+{
+    if (!*closed) {
+        int i;
+        disable_listensocks(process_slot);
+        ap_close_listeners_ex(my_bucket->listeners);
+        *closed = 1;
+        dying = 1;
+        ap_scoreboard_image->parent[process_slot].quiescing = 1;
+        for (i = 0; i < threads_per_child; ++i) {
+            ap_update_child_status_from_indexes(process_slot, i,
+                                                SERVER_GRACEFUL, NULL);
+        }
+        /* wake up the main thread */
+        kill(ap_my_pid, SIGTERM);
     }
 }
 
@@ -797,44 +1243,17 @@ static void dummy_signal_handler(int sig)
 }
 
 
-#if HAVE_SERF
-static apr_status_t s_socket_add(void *user_baton,
-                                 apr_pollfd_t *pfd,
-                                 void *serf_baton)
-{
-    s_baton_t *s = (s_baton_t*)user_baton;
-    /* XXXXX: recycle listener_poll_types */
-    listener_poll_type *pt = malloc(sizeof(*pt));
-    pt->type = PT_SERF;
-    pt->baton = serf_baton;
-    pfd->client_data = pt;
-    return apr_pollset_add(s->pollset, pfd);
-}
-
-static apr_status_t s_socket_remove(void *user_baton,
-                                    apr_pollfd_t *pfd,
-                                    void *serf_baton)
-{
-    s_baton_t *s = (s_baton_t*)user_baton;
-    listener_poll_type *pt = pfd->client_data;
-    free(pt);
-    return apr_pollset_remove(s->pollset, pfd);
-}
-#endif
-
 static apr_status_t init_pollset(apr_pool_t *p)
 {
-#if HAVE_SERF
-    s_baton_t *baton = NULL;
-#endif
     ap_listen_rec *lr;
     listener_poll_type *pt;
+    int i = 0;
 
-    APR_RING_INIT(&timeout_head, conn_state_t, timeout_list);
-    APR_RING_INIT(&keepalive_timeout_head, conn_state_t, timeout_list);
-
-    for (lr = ap_listeners; lr != NULL; lr = lr->next) {
-        apr_pollfd_t *pfd = apr_palloc(p, sizeof(*pfd));
+    listener_pollfd = apr_palloc(p, sizeof(apr_pollfd_t) * num_listensocks);
+    for (lr = my_bucket->listeners; lr != NULL; lr = lr->next, i++) {
+        apr_pollfd_t *pfd;
+        AP_DEBUG_ASSERT(i < num_listensocks);
+        pfd = &listener_pollfd[i];
         pt = apr_pcalloc(p, sizeof(*pt));
         pfd->desc_type = APR_POLL_SOCKET;
         pfd->desc.s = lr->sd;
@@ -851,21 +1270,6 @@ static apr_status_t init_pollset(apr_pool_t *p)
         lr->accept_func = ap_unixd_accept;
     }
 
-#if HAVE_SERF
-    baton = apr_pcalloc(p, sizeof(*baton));
-    baton->pollset = event_pollset;
-    /* TODO: subpools, threads, reuse, etc.  -- currently use malloc() inside :( */
-    baton->pool = p;
-
-    g_serf = serf_context_create_ex(baton,
-                                    s_socket_add,
-                                    s_socket_remove, p);
-
-    ap_register_provider(p, "mpm_serf",
-                         "instance", "0", g_serf);
-
-#endif
-
     return APR_SUCCESS;
 }
 
@@ -874,31 +1278,17 @@ static apr_status_t push_timer2worker(timer_event_t* te)
     return ap_queue_push_timer(worker_queue, te);
 }
 
+/*
+ * Pre-condition: pfd->cs is neither in pollset nor timeout queue
+ * this function may only be called by the listener
+ */
 static apr_status_t push2worker(const apr_pollfd_t * pfd,
                                 apr_pollset_t * pollset)
 {
     listener_poll_type *pt = (listener_poll_type *) pfd->client_data;
-    conn_state_t *cs = (conn_state_t *) pt->baton;
+    event_conn_state_t *cs = (event_conn_state_t *) pt->baton;
     apr_status_t rc;
 
-    if (pt->bypass_push) {
-        return APR_SUCCESS;
-    }
-
-    pt->bypass_push = 1;
-
-    rc = apr_pollset_remove(pollset, pfd);
-
-    /*
-     * Some of the pollset backends, like KQueue or Epoll
-     * automagically remove the FD if the socket is closed,
-     * therefore, we can accept _SUCCESS or _NOTFOUND,
-     * and we still want to keep going
-     */
-    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
-        cs->state = CONN_STATE_LINGER;
-    }
-
     rc = ap_queue_push(worker_queue, cs->pfd.desc.s, cs, cs->p);
     if (rc != APR_SUCCESS) {
         /* trash the connection; we couldn't queue the connected
@@ -907,8 +1297,7 @@ static apr_status_t push2worker(const apr_pollfd_t * pfd,
         apr_bucket_alloc_destroy(cs->bucket_alloc);
         apr_socket_close(cs->pfd.desc.s);
         ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                     ap_server_conf, "push2worker: ap_queue_push failed");
-        apr_pool_clear(cs->p);
+                     ap_server_conf, APLOGNO(00471) "push2worker: ap_queue_push failed");
         ap_push_pool(worker_queue_info, cs->p);
     }
 
@@ -916,67 +1305,86 @@ static apr_status_t push2worker(const apr_pollfd_t * pfd,
 }
 
 /* get_worker:
- *     reserve a worker thread, block if all are currently busy.
- *     this prevents the worker queue from overflowing and lets
- *     other processes accept new connections in the mean time.
+ *     If *have_idle_worker_p == 0, reserve a worker thread, and set
+ *     *have_idle_worker_p = 1.
+ *     If *have_idle_worker_p is already 1, will do nothing.
+ *     If blocking == 1, block if all workers are currently busy.
+ *     If no worker was available immediately, will set *all_busy to 1.
+ *     XXX: If there are no workers, we should not block immediately but
+ *     XXX: close all keep-alive connections first.
  */
-static int get_worker(int *have_idle_worker_p)
+static void get_worker(int *have_idle_worker_p, int blocking, int *all_busy)
 {
     apr_status_t rc;
 
-    if (!*have_idle_worker_p) {
-        rc = ap_queue_info_wait_for_idler(worker_queue_info);
-
-        if (rc == APR_SUCCESS) {
-            *have_idle_worker_p = 1;
-            return 1;
-        }
-        else {
-            if (!APR_STATUS_IS_EOF(rc)) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
-                             "ap_queue_info_wait_for_idler failed.  "
-                             "Attempting to shutdown process gracefully");
-                signal_threads(ST_GRACEFUL);
-            }
-            return 0;
-        }
-    }
-    else {
+    if (*have_idle_worker_p) {
         /* already reserved a worker thread - must have hit a
          * transient error on a previous pass
          */
-        return 1;
+        return;
     }
-}
 
-/* XXXXXX: Convert to skiplist or other better data structure 
- * (yes, this is VERY VERY VERY VERY BAD)
- */
+    if (blocking)
+        rc = ap_queue_info_wait_for_idler(worker_queue_info, all_busy);
+    else
+        rc = ap_queue_info_try_get_idler(worker_queue_info);
+
+    if (rc == APR_SUCCESS || APR_STATUS_IS_EOF(rc)) {
+        *have_idle_worker_p = 1;
+    }
+    else if (!blocking && rc == APR_EAGAIN) {
+        *all_busy = 1;
+    }
+    else {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(00472)
+                     "ap_queue_info_wait_for_idler failed.  "
+                     "Attempting to shutdown process gracefully");
+        signal_threads(ST_GRACEFUL);
+    }
+}
 
 /* Structures to reuse */
 static APR_RING_HEAD(timer_free_ring_t, timer_event_t) timer_free_ring;
-/* Active timers */
-static APR_RING_HEAD(timer_ring_t, timer_event_t) timer_ring;
 
-static apr_thread_mutex_t *g_timer_ring_mtx;
+static apr_skiplist *timer_skiplist;
+
+/* The following compare function is used by apr_skiplist_insert() to keep the
+ * elements (timers) sorted and provide O(log n) complexity (this is also true
+ * for apr_skiplist_{find,remove}(), but those are not used in MPM event where
+ * inserted timers are not searched nor removed, but with apr_skiplist_pop()
+ * which does use any compare function).  It is meant to return 0 when a == b,
+ * <0 when a < b, and >0 when a > b.  However apr_skiplist_insert() will not
+ * add duplicates (i.e. a == b), and apr_skiplist_add() is only available in
+ * APR 1.6, yet multiple timers could possibly be created in the same micro-
+ * second (duplicates with regard to apr_time_t); therefore we implement the
+ * compare function to return +1 instead of 0 when compared timers are equal,
+ * thus duplicates are still added after each other (in order of insertion).
+ */
+static int timer_comp(void *a, void *b)
+{
+    apr_time_t t1 = (apr_time_t) ((timer_event_t *)a)->when;
+    apr_time_t t2 = (apr_time_t) ((timer_event_t *)b)->when;
+    AP_DEBUG_ASSERT(t1);
+    AP_DEBUG_ASSERT(t2);
+    return ((t1 < t2) ? -1 : 1);
+}
+
+static apr_thread_mutex_t *g_timer_skiplist_mtx;
 
 static apr_status_t event_register_timed_callback(apr_time_t t,
                                                   ap_mpm_callback_fn_t *cbfn,
                                                   void *baton)
 {
-    int inserted = 0;
-    timer_event_t *ep;
     timer_event_t *te;
     /* oh yeah, and make locking smarter/fine grained. */
-    apr_thread_mutex_lock(g_timer_ring_mtx);
+    apr_thread_mutex_lock(g_timer_skiplist_mtx);
 
     if (!APR_RING_EMPTY(&timer_free_ring, timer_event_t, link)) {
         te = APR_RING_FIRST(&timer_free_ring);
         APR_RING_REMOVE(te, link);
     }
     else {
-        /* XXXXX: lol, pool allocation without a context from any thread.Yeah. Right. MPMs Suck. */
-        te = malloc(sizeof(timer_event_t));
+        te = apr_skiplist_alloc(timer_skiplist, sizeof(timer_event_t));
         APR_RING_ELEM_INIT(te, link);
     }
 
@@ -985,26 +1393,123 @@ static apr_status_t event_register_timed_callback(apr_time_t t,
     /* XXXXX: optimize */
     te->when = t + apr_time_now();
 
-    /* Okay, insert sorted by when.. */
-    for (ep = APR_RING_FIRST(&timer_ring);
-         ep != APR_RING_SENTINEL(&timer_ring,
-                                 timer_event_t, link);
-         ep = APR_RING_NEXT(ep, link))
-    {
-        if (ep->when > te->when) {
-            inserted = 1;
-            APR_RING_INSERT_BEFORE(ep, te, link);
-            break;
-        }
+    /* Okay, add sorted by when.. */
+    apr_skiplist_insert(timer_skiplist, te);
+
+    apr_thread_mutex_unlock(g_timer_skiplist_mtx);
+
+    return APR_SUCCESS;
+}
+
+
+/*
+ * Close socket and clean up if remote closed its end while we were in
+ * lingering close.
+ * Only to be called in the listener thread;
+ * Pre-condition: cs is in one of the linger queues and in the pollset
+ */
+static void process_lingering_close(event_conn_state_t *cs, const apr_pollfd_t *pfd)
+{
+    apr_socket_t *csd = ap_get_conn_socket(cs->c);
+    char dummybuf[2048];
+    apr_size_t nbytes;
+    apr_status_t rv;
+    struct timeout_queue *q;
+    q = (cs->pub.state == CONN_STATE_LINGER_SHORT) ? short_linger_q : linger_q;
+
+    /* socket is already in non-blocking state */
+    do {
+        nbytes = sizeof(dummybuf);
+        rv = apr_socket_recv(csd, dummybuf, &nbytes);
+    } while (rv == APR_SUCCESS);
+
+    if (APR_STATUS_IS_EAGAIN(rv)) {
+        return;
     }
-    
-    if (!inserted) {
-        APR_RING_INSERT_TAIL(&timer_ring, te, timer_event_t, link);
+
+    apr_thread_mutex_lock(timeout_mutex);
+    rv = apr_pollset_remove(event_pollset, pfd);
+    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+    rv = apr_socket_close(csd);
+    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+    TO_QUEUE_REMOVE(q, cs);
+    apr_thread_mutex_unlock(timeout_mutex);
+    TO_QUEUE_ELEM_INIT(cs);
+
+    ap_push_pool(worker_queue_info, cs->p);
+}
+
+/* call 'func' for all elements of 'q' with timeout less than 'timeout_time'.
+ * Pre-condition: timeout_mutex must already be locked
+ * Post-condition: timeout_mutex will be locked again
+ */
+static void process_timeout_queue(struct timeout_queue *q,
+                                  apr_time_t timeout_time,
+                                  int (*func)(event_conn_state_t *))
+{
+    int total = 0, count;
+    event_conn_state_t *first, *cs, *last;
+    struct timeout_head_t trash;
+    struct timeout_queue *qp;
+    apr_status_t rv;
+
+    if (!*q->total) {
+        return;
     }
 
-    apr_thread_mutex_unlock(g_timer_ring_mtx);
+    APR_RING_INIT(&trash, event_conn_state_t, timeout_list);
+    for (qp = q; qp; qp = qp->next) {
+        count = 0;
+        cs = first = last = APR_RING_FIRST(&qp->head);
+        while (cs != APR_RING_SENTINEL(&qp->head, event_conn_state_t,
+                                       timeout_list)
+               /* Trash the entry if:
+                * - no timeout_time was given (asked for all), or
+                * - it expired (according to the queue timeout), or
+                * - the system clock skewed in the past: no entry should be
+                *   registered above the given timeout_time (~now) + the queue
+                *   timeout, we won't keep any here (eg. for centuries).
+                * Stop otherwise, no following entry will match thanks to the
+                * single timeout per queue (entries are added to the end!).
+                * This allows maintenance in O(1).
+                */
+               && (!timeout_time
+                   || cs->queue_timestamp + qp->timeout < timeout_time
+                   || cs->queue_timestamp > timeout_time + qp->timeout)) {
+            last = cs;
+            rv = apr_pollset_remove(event_pollset, &cs->pfd);
+            if (rv != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rv)) {
+                ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, cs->c, APLOGNO(00473)
+                              "apr_pollset_remove failed");
+            }
+            cs = APR_RING_NEXT(cs, timeout_list);
+            count++;
+        }
+        if (!count)
+            continue;
 
-    return APR_SUCCESS;
+        APR_RING_UNSPLICE(first, last, timeout_list);
+        APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
+                             timeout_list);
+        qp->count -= count;
+        total += count;
+    }
+    if (!total)
+        return;
+
+    AP_DEBUG_ASSERT(*q->total >= total);
+    *q->total -= total;
+    apr_thread_mutex_unlock(timeout_mutex);
+    first = APR_RING_FIRST(&trash);
+    do {
+        cs = APR_RING_NEXT(first, timeout_list);
+        TO_QUEUE_ELEM_INIT(first);
+        func(first);
+        first = cs;
+    } while (--total);
+    apr_thread_mutex_lock(timeout_mutex);
 }
 
 static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
@@ -1019,14 +1524,14 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
     ap_listen_rec *lr;
     int have_idle_worker = 0;
-    conn_state_t *cs;
     const apr_pollfd_t *out_pfd;
     apr_int32_t num = 0;
-    apr_time_t time_now = 0;
     apr_interval_time_t timeout_interval;
-    apr_time_t timeout_time;
+    apr_time_t timeout_time = 0, now, last_log;
     listener_poll_type *pt;
+    int closed = 0, listeners_disabled = 0;
 
+    last_log = apr_time_now();
     free(ti);
 
     /* the following times out events that are really close in the future
@@ -1052,177 +1557,244 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     unblock_signal(LISTENER_SIGNAL);
     apr_signal(LISTENER_SIGNAL, dummy_signal_handler);
 
-    while (!listener_may_exit) {
-
-        if (requests_this_child <= 0) {
-            check_infinite_requests();
+    for (;;) {
+        int workers_were_busy = 0;
+        if (listener_may_exit) {
+            close_listeners(process_slot, &closed);
+            if (terminate_mode == ST_UNGRACEFUL
+                || apr_atomic_read32(&connection_count) == 0)
+                break;
         }
 
+        if (conns_this_child <= 0)
+            check_infinite_requests();
 
-        {
-            apr_time_t now = apr_time_now();
-            apr_thread_mutex_lock(g_timer_ring_mtx);
+        now = apr_time_now();
+        if (APLOGtrace6(ap_server_conf)) {
+            /* trace log status every second */
+            if (now - last_log > apr_time_from_msec(1000)) {
+                last_log = now;
+                apr_thread_mutex_lock(timeout_mutex);
+                ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
+                             "connections: %u (clogged: %u write-completion: %d "
+                             "keep-alive: %d lingering: %d suspended: %u)",
+                             apr_atomic_read32(&connection_count),
+                             apr_atomic_read32(&clogged_count),
+                             *write_completion_q->total,
+                             *keepalive_q->total,
+                             apr_atomic_read32(&lingering_count),
+                             apr_atomic_read32(&suspended_count));
+                apr_thread_mutex_unlock(timeout_mutex);
+            }
+        }
 
-            if (!APR_RING_EMPTY(&timer_ring, timer_event_t, link)) {
-                te = APR_RING_FIRST(&timer_ring);
-                if (te->when > now) {
-                    timeout_interval = te->when - now;
-                }
-                else {
-                    timeout_interval = 1;
-                }
+        apr_thread_mutex_lock(g_timer_skiplist_mtx);
+        te = apr_skiplist_peek(timer_skiplist);
+        if (te) {
+            if (te->when > now) {
+                timeout_interval = te->when - now;
             }
             else {
-                timeout_interval = apr_time_from_msec(100);
+                timeout_interval = 1;
             }
-            apr_thread_mutex_unlock(g_timer_ring_mtx);
         }
-
-#if HAVE_SERF
-        rc = serf_context_prerun(g_serf);
-        if (rc != APR_SUCCESS) {
-            /* TOOD: what should do here? ugh. */
+        else {
+            timeout_interval = apr_time_from_msec(100);
         }
-        
-#endif
-        rc = apr_pollset_poll(event_pollset, timeout_interval, &num,
-                              &out_pfd);
+        apr_thread_mutex_unlock(g_timer_skiplist_mtx);
 
+        rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd);
         if (rc != APR_SUCCESS) {
             if (APR_STATUS_IS_EINTR(rc)) {
                 continue;
             }
             if (!APR_STATUS_IS_TIMEUP(rc)) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf,
                              "apr_pollset_poll failed.  Attempting to "
                              "shutdown process gracefully");
                 signal_threads(ST_GRACEFUL);
             }
         }
 
-        if (listener_may_exit)
-            break;
+        if (listener_may_exit) {
+            close_listeners(process_slot, &closed);
+            if (terminate_mode == ST_UNGRACEFUL
+                || apr_atomic_read32(&connection_count) == 0)
+                break;
+        }
 
-        {
-            apr_time_t now = apr_time_now();
-            apr_thread_mutex_lock(g_timer_ring_mtx);
-            for (ep = APR_RING_FIRST(&timer_ring);
-                 ep != APR_RING_SENTINEL(&timer_ring,
-                                         timer_event_t, link);
-                 ep = APR_RING_FIRST(&timer_ring))
-            {
-                if (ep->when < now + EVENT_FUDGE_FACTOR) {
-                    APR_RING_REMOVE(ep, link);
-                    push_timer2worker(ep);
-                }
-                else {
-                    break;
-                }
+        now = apr_time_now();
+        apr_thread_mutex_lock(g_timer_skiplist_mtx);
+        ep = apr_skiplist_peek(timer_skiplist);
+        while (ep) {
+            if (ep->when < now + EVENT_FUDGE_FACTOR) {
+                apr_skiplist_pop(timer_skiplist, NULL);
+                push_timer2worker(ep);
+            }
+            else {
+                break;
             }
-            apr_thread_mutex_unlock(g_timer_ring_mtx);
+            ep = apr_skiplist_peek(timer_skiplist);
         }
+        apr_thread_mutex_unlock(g_timer_skiplist_mtx);
 
-        while (num && get_worker(&have_idle_worker)) {
+        while (num) {
             pt = (listener_poll_type *) out_pfd->client_data;
             if (pt->type == PT_CSD) {
                 /* one of the sockets is readable */
-                cs = (conn_state_t *) pt->baton;
-                switch (cs->state) {
+                event_conn_state_t *cs = (event_conn_state_t *) pt->baton;
+                struct timeout_queue *remove_from_q = cs->sc->wc_q;
+                int blocking = 1;
+
+                switch (cs->pub.state) {
                 case CONN_STATE_CHECK_REQUEST_LINE_READABLE:
-                    cs->state = CONN_STATE_READ_REQUEST_LINE;
-                    break;
+                    cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
+                    remove_from_q = cs->sc->ka_q;
+                    /* don't wait for a worker for a keepalive request */
+                    blocking = 0;
+                    /* FALL THROUGH */
                 case CONN_STATE_WRITE_COMPLETION:
+                    get_worker(&have_idle_worker, blocking,
+                               &workers_were_busy);
+                    apr_thread_mutex_lock(timeout_mutex);
+                    TO_QUEUE_REMOVE(remove_from_q, cs);
+                    rc = apr_pollset_remove(event_pollset, &cs->pfd);
+                    apr_thread_mutex_unlock(timeout_mutex);
+
+                    /*
+                     * Some of the pollset backends, like KQueue or Epoll
+                     * automagically remove the FD if the socket is closed,
+                     * therefore, we can accept _SUCCESS or _NOTFOUND,
+                     * and we still want to keep going
+                     */
+                    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
+                        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+                                     APLOGNO(03094) "pollset remove failed");
+                        start_lingering_close_nonblocking(cs);
+                        break;
+                    }
+
+                    TO_QUEUE_ELEM_INIT(cs);
+                    /* If we didn't get a worker immediately for a keep-alive
+                     * request, we close the connection, so that the client can
+                     * re-connect to a different process.
+                     */
+                    if (!have_idle_worker) {
+                        start_lingering_close_nonblocking(cs);
+                        break;
+                    }
+                    rc = push2worker(out_pfd, event_pollset);
+                    if (rc != APR_SUCCESS) {
+                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
+                                     ap_server_conf, APLOGNO(03095)
+                                     "push2worker failed");
+                    }
+                    else {
+                        have_idle_worker = 0;
+                    }
+                    break;
+                case CONN_STATE_LINGER_NORMAL:
+                case CONN_STATE_LINGER_SHORT:
+                    process_lingering_close(cs, out_pfd);
                     break;
                 default:
-                    ap_log_error(APLOG_MARK, APLOG_ERR, rc,
-                                 ap_server_conf,
-                                 "event_loop: unexpected state %d",
-                                 cs->state);
-                    AP_DEBUG_ASSERT(0);
-                }
-
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_REMOVE(cs, timeout_list);
-                apr_thread_mutex_unlock(timeout_mutex);
-                APR_RING_ELEM_INIT(cs, timeout_list);
-
-                rc = push2worker(out_pfd, event_pollset);
-                if (rc != APR_SUCCESS) {
                     ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                 ap_server_conf, "push2worker failed");
-                }
-                else {
-                    have_idle_worker = 0;
+                                 ap_server_conf, APLOGNO(03096)
+                                 "event_loop: unexpected state %d",
+                                 cs->pub.state);
+                    ap_assert(0);
                 }
             }
             else if (pt->type == PT_ACCEPT) {
                 /* A Listener Socket is ready for an accept() */
+                if (workers_were_busy) {
+                    if (!listeners_disabled)
+                        disable_listensocks(process_slot);
+                    listeners_disabled = 1;
+                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+                                 "All workers busy, not accepting new conns "
+                                 "in this process");
+                }
+                else if (  (int)apr_atomic_read32(&connection_count)
+                           - (int)apr_atomic_read32(&lingering_count)
+                         > threads_per_child
+                           + ap_queue_info_get_idlers(worker_queue_info) *
+                             worker_factor / WORKER_FACTOR_SCALE)
+                {
+                    if (!listeners_disabled)
+                        disable_listensocks(process_slot);
+                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+                                 "Too many open connections (%u), "
+                                 "not accepting new conns in this process",
+                                 apr_atomic_read32(&connection_count));
+                    ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                                 "Idle workers: %u",
+                                 ap_queue_info_get_idlers(worker_queue_info));
+                    listeners_disabled = 1;
+                }
+                else if (listeners_disabled) {
+                    listeners_disabled = 0;
+                    enable_listensocks(process_slot);
+                }
+                if (!listeners_disabled) {
+                    lr = (ap_listen_rec *) pt->baton;
+                    ap_pop_pool(&ptrans, worker_queue_info);
 
-                lr = (ap_listen_rec *) pt->baton;
-
-                ap_pop_pool(&ptrans, worker_queue_info);
-
-                if (ptrans == NULL) {
-                    /* create a new transaction pool for each accepted socket */
-                    apr_allocator_t *allocator;
-
-                    apr_allocator_create(&allocator);
-                    apr_allocator_max_free_set(allocator,
-                                               ap_max_mem_free);
-                    apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
-                    apr_allocator_owner_set(allocator, ptrans);
                     if (ptrans == NULL) {
-                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                     ap_server_conf,
-                                     "Failed to create transaction pool");
-                        signal_threads(ST_GRACEFUL);
-                        return NULL;
+                        /* create a new transaction pool for each accepted socket */
+                        apr_allocator_t *allocator;
+
+                        apr_allocator_create(&allocator);
+                        apr_allocator_max_free_set(allocator,
+                                                   ap_max_mem_free);
+                        apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
+                        apr_allocator_owner_set(allocator, ptrans);
+                        if (ptrans == NULL) {
+                            ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
+                                         ap_server_conf, APLOGNO(03097)
+                                         "Failed to create transaction pool");
+                            signal_threads(ST_GRACEFUL);
+                            return NULL;
+                        }
                     }
-                }
-                apr_pool_tag(ptrans, "transaction");
+                    apr_pool_tag(ptrans, "transaction");
 
-                rc = lr->accept_func(&csd, lr, ptrans);
+                    get_worker(&have_idle_worker, 1, &workers_were_busy);
+                    rc = lr->accept_func(&csd, lr, ptrans);
 
-                /* later we trash rv and rely on csd to indicate
-                 * success/failure
-                 */
-                AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd);
+                    /* later we trash rv and rely on csd to indicate
+                     * success/failure
+                     */
+                    AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd);
 
-                if (rc == APR_EGENERAL) {
-                    /* E[NM]FILE, ENOMEM, etc */
-                    resource_shortage = 1;
-                    signal_threads(ST_GRACEFUL);
-                }
+                    if (rc == APR_EGENERAL) {
+                        /* E[NM]FILE, ENOMEM, etc */
+                        resource_shortage = 1;
+                        signal_threads(ST_GRACEFUL);
+                    }
 
-                if (csd != NULL) {
-                    rc = ap_queue_push(worker_queue, csd, NULL, ptrans);
-                    if (rc != APR_SUCCESS) {
-                        /* trash the connection; we couldn't queue the connected
-                         * socket to a worker
-                         */
-                        apr_socket_close(csd);
-                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                     ap_server_conf,
-                                     "ap_queue_push failed");
-                        apr_pool_clear(ptrans);
-                        ap_push_pool(worker_queue_info, ptrans);
+                    if (csd != NULL) {
+                        conns_this_child--;
+                        rc = ap_queue_push(worker_queue, csd, NULL, ptrans);
+                        if (rc != APR_SUCCESS) {
+                            /* trash the connection; we couldn't queue the connected
+                             * socket to a worker
+                             */
+                            apr_socket_close(csd);
+                            ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
+                                         ap_server_conf, APLOGNO(03098)
+                                         "ap_queue_push failed");
+                            ap_push_pool(worker_queue_info, ptrans);
+                        }
+                        else {
+                            have_idle_worker = 0;
+                        }
                     }
                     else {
-                        have_idle_worker = 0;
+                        ap_push_pool(worker_queue_info, ptrans);
                     }
                 }
-                else {
-                    apr_pool_clear(ptrans);
-                    ap_push_pool(worker_queue_info, ptrans);
-                }
             }               /* if:else on pt->type */
-#if HAVE_SERF
-            else if (pt->type == PT_SERF) {
-                /* send socket to serf. */
-                /* XXXX: this doesn't require get_worker(&have_idle_worker) */
-                serf_event_trigger(g_serf, pt->baton, out_pfd);
-            }
-#endif
             out_pfd++;
             num--;
         }                   /* while for processing poll */
@@ -1230,80 +1802,68 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         /* XXX possible optimization: stash the current time for use as
          * r->request_time for new requests
          */
-        time_now = apr_time_now();
-
-        /* handle timed out sockets */
-        apr_thread_mutex_lock(timeout_mutex);
-
-        /* Step 1: keepalive timeouts */
-        cs = APR_RING_FIRST(&keepalive_timeout_head);
-        timeout_time = time_now + TIMEOUT_FUDGE_FACTOR;
-        while (!APR_RING_EMPTY(&keepalive_timeout_head, conn_state_t, timeout_list)
-               && cs->expiration_time < timeout_time) {
-
-            cs->state = CONN_STATE_LINGER;
+        now = apr_time_now();
+        /* We only do this once per 0.1s (TIMEOUT_FUDGE_FACTOR), or on a clock
+         * skew (if the system time is set back in the meantime, timeout_time
+         * will exceed now + TIMEOUT_FUDGE_FACTOR, can't happen otherwise).
+         */
+        if (now > timeout_time || now + TIMEOUT_FUDGE_FACTOR < timeout_time ) {
+            struct process_score *ps;
+            timeout_time = now + TIMEOUT_FUDGE_FACTOR;
 
-            APR_RING_REMOVE(cs, timeout_list);
-            apr_thread_mutex_unlock(timeout_mutex);
+            /* handle timed out sockets */
+            apr_thread_mutex_lock(timeout_mutex);
 
-            if (!get_worker(&have_idle_worker)) {
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_INSERT_HEAD(&keepalive_timeout_head, cs,
-                                     conn_state_t, timeout_list);
-                break;
+            /* Step 1: keepalive timeouts */
+            /* If all workers are busy, we kill older keep-alive connections so that they
+             * may connect to another process.
+             */
+            if (workers_were_busy && *keepalive_q->total) {
+                ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                             "All workers are busy, will close %d keep-alive "
+                             "connections",
+                             *keepalive_q->total);
+                process_timeout_queue(keepalive_q, 0,
+                                      start_lingering_close_nonblocking);
             }
-
-            rc = push2worker(&cs->pfd, event_pollset);
-
-            if (rc != APR_SUCCESS) {
-                return NULL;
-                /* XXX return NULL looks wrong - not an init failure
-                 * that bypasses all the cleanup outside the main loop
-                 * break seems more like it
-                 * need to evaluate seriousness of push2worker failures
-                 */
+            else {
+                process_timeout_queue(keepalive_q, timeout_time,
+                                      start_lingering_close_nonblocking);
             }
-            have_idle_worker = 0;
-            apr_thread_mutex_lock(timeout_mutex);
-            cs = APR_RING_FIRST(&keepalive_timeout_head);
-        }
-
-        /* Step 2: write completion timeouts */
-        cs = APR_RING_FIRST(&timeout_head);
-        while (!APR_RING_EMPTY(&timeout_head, conn_state_t, timeout_list)
-               && cs->expiration_time < timeout_time) {
-
-            cs->state = CONN_STATE_LINGER;
-            APR_RING_REMOVE(cs, timeout_list);
+            /* Step 2: write completion timeouts */
+            process_timeout_queue(write_completion_q, timeout_time,
+                                  start_lingering_close_nonblocking);
+            /* Step 3: (normal) lingering close completion timeouts */
+            process_timeout_queue(linger_q, timeout_time, stop_lingering_close);
+            /* Step 4: (short) lingering close completion timeouts */
+            process_timeout_queue(short_linger_q, timeout_time, stop_lingering_close);
+
+            ps = ap_get_scoreboard_process(process_slot);
+            ps->write_completion = *write_completion_q->total;
+            ps->keep_alive = *keepalive_q->total;
             apr_thread_mutex_unlock(timeout_mutex);
 
-            if (!get_worker(&have_idle_worker)) {
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_INSERT_HEAD(&timeout_head, cs,
-                                     conn_state_t, timeout_list);
-                break;
-            }
-
-            rc = push2worker(&cs->pfd, event_pollset);
-            if (rc != APR_SUCCESS) {
-                return NULL;
-            }
-            have_idle_worker = 0;
-            apr_thread_mutex_lock(timeout_mutex);
-            cs = APR_RING_FIRST(&timeout_head);
+            ps->connections = apr_atomic_read32(&connection_count);
+            ps->suspended = apr_atomic_read32(&suspended_count);
+            ps->lingering_close = apr_atomic_read32(&lingering_count);
         }
-
-        apr_thread_mutex_unlock(timeout_mutex);
-
+        if (listeners_disabled && !workers_were_busy
+            && (int)apr_atomic_read32(&connection_count)
+               - (int)apr_atomic_read32(&lingering_count)
+               < ((int)ap_queue_info_get_idlers(worker_queue_info) - 1)
+                 * worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
+        {
+            listeners_disabled = 0;
+            enable_listensocks(process_slot);
+        }
+        /*
+         * XXX: do we need to set some timeout that re-enables the listensocks
+         * XXX: in case no other event occurs?
+         */
     }     /* listener main loop */
 
-    ap_close_listeners();
+    close_listeners(process_slot, &closed);
     ap_queue_term(worker_queue);
-    dying = 1;
-    ap_scoreboard_image->parent[process_slot].quiescing = 1;
-
-    /* wake up the main thread */
-    kill(ap_my_pid, SIGTERM);
 
     apr_thread_exit(thd, APR_SUCCESS);
     return NULL;
@@ -1322,17 +1882,17 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
     int process_slot = ti->pid;
     int thread_slot = ti->tid;
     apr_socket_t *csd = NULL;
-    conn_state_t *cs;
+    event_conn_state_t *cs;
     apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
     apr_status_t rv;
     int is_idle = 0;
     timer_event_t *te = NULL;
-    
+
     free(ti);
 
     ap_scoreboard_image->servers[process_slot][thread_slot].pid = ap_my_pid;
     ap_scoreboard_image->servers[process_slot][thread_slot].tid = apr_os_thread_current();
-    ap_scoreboard_image->servers[process_slot][thread_slot].generation = my_generation;
+    ap_scoreboard_image->servers[process_slot][thread_slot].generation = retained->my_generation;
     ap_update_child_status_from_indexes(process_slot, thread_slot,
                                         SERVER_STARTING, NULL);
 
@@ -1350,14 +1910,13 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
         }
 
         ap_update_child_status_from_indexes(process_slot, thread_slot,
-                                            SERVER_READY, NULL);
+                                            dying ? SERVER_GRACEFUL : SERVER_READY, NULL);
       worker_pop:
         if (workers_may_exit) {
             break;
         }
 
         te = NULL;
-        
         rv = ap_queue_pop_something(worker_queue, &csd, &cs, &ptrans, &te);
 
         if (rv != APR_SUCCESS) {
@@ -1384,33 +1943,29 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
             /* We got some other error. */
             else if (!workers_may_exit) {
                 ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
-                             "ap_queue_pop failed");
+                             APLOGNO(03099) "ap_queue_pop failed");
             }
             continue;
         }
         if (te != NULL) {
-            
             te->cbfunc(te->baton);
 
             {
-                apr_thread_mutex_lock(g_timer_ring_mtx);
+                apr_thread_mutex_lock(g_timer_skiplist_mtx);
                 APR_RING_INSERT_TAIL(&timer_free_ring, te, timer_event_t, link);
-                apr_thread_mutex_unlock(g_timer_ring_mtx);
+                apr_thread_mutex_unlock(g_timer_skiplist_mtx);
             }
         }
         else {
             is_idle = 0;
             worker_sockets[thread_slot] = csd;
-            rv = process_socket(thd, ptrans, csd, cs, process_slot, thread_slot);
-            if (!rv) {
-                requests_this_child--;
-            }
+            process_socket(thd, ptrans, csd, cs, process_slot, thread_slot);
             worker_sockets[thread_slot] = NULL;
         }
     }
 
     ap_update_child_status_from_indexes(process_slot, thread_slot,
-                                        (dying) ? SERVER_DEAD :
+                                        dying ? SERVER_DEAD :
                                         SERVER_GRACEFUL,
                                         (request_rec *) NULL);
 
@@ -1437,14 +1992,14 @@ static void create_listener_thread(thread_starter * ts)
     proc_info *my_info;
     apr_status_t rv;
 
-    my_info = (proc_info *) malloc(sizeof(proc_info));
+    my_info = (proc_info *) ap_malloc(sizeof(proc_info));
     my_info->pid = my_child_num;
     my_info->tid = -1;          /* listener thread doesn't have a thread slot */
     my_info->sd = 0;
     rv = apr_thread_create(&ts->listener, thread_attr, listener_thread,
                            my_info, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00474)
                      "apr_thread_create: unable to create listener thread");
         /* let the parent decide how bad this really is */
         clean_child_exit(APEXIT_CHILDSICK);
@@ -1471,21 +2026,30 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     int listener_started = 0;
     int loops;
     int prev_threads_created;
+    int max_recycled_pools = -1;
+    int good_methods[] = {APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL};
 
     /* We must create the fd queues before we start up the listener
      * and worker threads. */
     worker_queue = apr_pcalloc(pchild, sizeof(*worker_queue));
     rv = ap_queue_init(worker_queue, threads_per_child, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03100)
                      "ap_queue_init() failed");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
+    if (ap_max_mem_free != APR_ALLOCATOR_MAX_FREE_UNLIMITED) {
+        /* If we want to conserve memory, let's not keep an unlimited number of
+         * pools & allocators.
+         * XXX: This should probably be a separate config directive
+         */
+        max_recycled_pools = threads_per_child * 3 / 4 ;
+    }
     rv = ap_queue_info_create(&worker_queue_info, pchild,
-                              threads_per_child);
+                              threads_per_child, max_recycled_pools);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03101)
                      "ap_queue_info_create() failed");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
@@ -1496,21 +2060,40 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     rv = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_DEFAULT,
                                  pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03102)
                      "creation of the timeout mutex failed.");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
     /* Create the main pollset */
-    rv = apr_pollset_create(&event_pollset,
-                            threads_per_child,
-                            pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
+    for (i = 0; i < sizeof(good_methods) / sizeof(void*); i++) {
+        rv = apr_pollset_create_ex(&event_pollset,
+                            threads_per_child*2, /* XXX don't we need more, to handle
+                                                * connections in K-A or lingering
+                                                * close?
+                                                */
+                            pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY | APR_POLLSET_NODEFAULT,
+                            good_methods[i]);
+        if (rv == APR_SUCCESS) {
+            break;
+        }
+    }
+    if (rv != APR_SUCCESS) {
+        rv = apr_pollset_create(&event_pollset,
+                               threads_per_child*2, /* XXX don't we need more, to handle
+                                                     * connections in K-A or lingering
+                                                     * close?
+                                                     */
+                               pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
+    }
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03103)
                      "apr_pollset_create with Thread Safety failed.");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471)
+                 "start_threads: Using %s", apr_pollset_method_name(event_pollset));
     worker_sockets = apr_pcalloc(pchild, threads_per_child
                                  * sizeof(apr_socket_t *));
 
@@ -1525,12 +2108,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
                 continue;
             }
 
-            my_info = (proc_info *) malloc(sizeof(proc_info));
-            if (my_info == NULL) {
-                ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                             "malloc: out of memory");
-                clean_child_exit(APEXIT_CHILDFATAL);
-            }
+            my_info = (proc_info *) ap_malloc(sizeof(proc_info));
             my_info->pid = my_child_num;
             my_info->tid = i;
             my_info->sd = 0;
@@ -1545,6 +2123,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
                                    worker_thread, my_info, pchild);
             if (rv != APR_SUCCESS) {
                 ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+                             APLOGNO(03104)
                              "apr_thread_create: unable to create worker thread");
                 /* let the parent decide how bad this really is */
                 clean_child_exit(APEXIT_CHILDSICK);
@@ -1608,26 +2187,20 @@ static void join_workers(apr_thread_t * listener, apr_thread_t ** threads)
          */
 
         iter = 0;
-        while (iter < 10 &&
-#ifdef HAVE_PTHREAD_KILL
-               pthread_kill(*listener_os_thread, 0)
-#else
-               kill(ap_my_pid, 0)
-#endif
-               == 0) {
-            /* listener not dead yet */
+        while (iter < 10 && !dying) {
+            /* listener has not stopped accepting yet */
             apr_sleep(apr_time_make(0, 500000));
             wakeup_listener();
             ++iter;
         }
         if (iter >= 10) {
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                         "the listener thread didn't exit");
+            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00475)
+                         "the listener thread didn't stop accepting");
         }
         else {
             rv = apr_thread_join(&thread_rv, listener);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00476)
                              "apr_thread_join: unable to join listener thread");
             }
         }
@@ -1637,7 +2210,7 @@ static void join_workers(apr_thread_t * listener, apr_thread_t ** threads)
         if (threads[i]) {       /* if we ever created this thread */
             rv = apr_thread_join(&thread_rv, threads[i]);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00477)
                              "apr_thread_join: unable to join worker "
                              "thread %d", i);
             }
@@ -1655,18 +2228,19 @@ static void join_start_thread(apr_thread_t * start_thread_id)
                                  */
     rv = apr_thread_join(&thread_rv, start_thread_id);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00478)
                      "apr_thread_join: unable to join the start " "thread");
     }
 }
 
-static void child_main(int child_num_arg)
+static void child_main(int child_num_arg, int child_bucket)
 {
     apr_thread_t **threads;
     apr_status_t rv;
     thread_starter *ts;
     apr_threadattr_t *thread_attr;
     apr_thread_t *start_thread_id;
+    int i;
 
     mpm_state = AP_MPMQ_STARTING;       /* for benefit of any hooks that run as this
                                          * child initializes
@@ -1675,6 +2249,14 @@ static void child_main(int child_num_arg)
     ap_fatal_signal_child_setup(ap_server_conf);
     apr_pool_create(&pchild, pconf);
 
+    /* close unused listeners and pods */
+    for (i = 0; i < retained->num_buckets; i++) {
+        if (i != child_bucket) {
+            ap_close_listeners_ex(all_buckets[i].listeners);
+            ap_mpm_podx_close(all_buckets[i].pod);
+        }
+    }
+
     /*stuff to do before we switch id's, so we have permissions. */
     ap_reopen_scoreboard(pchild, NULL, 0);
 
@@ -1682,10 +2264,10 @@ static void child_main(int child_num_arg)
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
-    apr_thread_mutex_create(&g_timer_ring_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
+    apr_thread_mutex_create(&g_timer_skiplist_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
     APR_RING_INIT(&timer_free_ring, timer_event_t, link);
-    APR_RING_INIT(&timer_ring, timer_event_t, link);
-    
+    apr_skiplist_init(&timer_skiplist, pchild);
+    apr_skiplist_set_compare(timer_skiplist, timer_comp, timer_comp);
     ap_run_child_init(pchild, ap_server_conf);
 
     /* done with init critical section */
@@ -1696,17 +2278,17 @@ static void child_main(int child_num_arg)
      */
     rv = apr_setup_signal_thread();
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(00479)
                      "Couldn't initialize signal thread");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
     if (ap_max_requests_per_child) {
-        requests_this_child = ap_max_requests_per_child;
+        conns_this_child = ap_max_requests_per_child;
     }
     else {
         /* coding a value of zero means infinity */
-        requests_this_child = INT_MAX;
+        conns_this_child = APR_INT32_MAX;
     }
 
     /* Setup worker threads */
@@ -1714,23 +2296,21 @@ static void child_main(int child_num_arg)
     /* clear the storage; we may not create all our threads immediately,
      * and we want a 0 entry to indicate a thread which was not created
      */
-    threads = (apr_thread_t **) calloc(1,
-                                       sizeof(apr_thread_t *) *
-                                       threads_per_child);
-    if (threads == NULL) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                     "malloc: out of memory");
-        clean_child_exit(APEXIT_CHILDFATAL);
-    }
-
-    ts = (thread_starter *) apr_palloc(pchild, sizeof(*ts));
+    threads = ap_calloc(threads_per_child, sizeof(apr_thread_t *));
+    ts = apr_palloc(pchild, sizeof(*ts));
 
     apr_threadattr_create(&thread_attr, pchild);
     /* 0 means PTHREAD_CREATE_JOINABLE */
     apr_threadattr_detach_set(thread_attr, 0);
 
     if (ap_thread_stacksize != 0) {
-        apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
+        rv = apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
+        if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
+            ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(02436)
+                         "WARNING: ThreadStackSize of %" APR_SIZE_T_FMT " is "
+                         "inappropriate, using default", 
+                         ap_thread_stacksize);
+        }
     }
 
     ts->threads = threads;
@@ -1741,7 +2321,7 @@ static void child_main(int child_num_arg)
     rv = apr_thread_create(&start_thread_id, thread_attr, start_threads,
                            ts, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00480)
                      "apr_thread_create: unable to create worker thread");
         /* let the parent decide how bad this really is */
         clean_child_exit(APEXIT_CHILDSICK);
@@ -1786,25 +2366,25 @@ static void child_main(int child_num_arg)
         apr_signal(SIGTERM, dummy_signal_handler);
         /* Watch for any messages from the parent over the POD */
         while (1) {
-            rv = ap_event_pod_check(pod);
-            if (rv == AP_NORESTART) {
+            rv = ap_mpm_podx_check(my_bucket->pod);
+            if (rv == AP_MPM_PODX_NORESTART) {
                 /* see if termination was triggered while we slept */
                 switch (terminate_mode) {
                 case ST_GRACEFUL:
-                    rv = AP_GRACEFUL;
+                    rv = AP_MPM_PODX_GRACEFUL;
                     break;
                 case ST_UNGRACEFUL:
-                    rv = AP_RESTART;
+                    rv = AP_MPM_PODX_RESTART;
                     break;
                 }
             }
-            if (rv == AP_GRACEFUL || rv == AP_RESTART) {
+            if (rv == AP_MPM_PODX_GRACEFUL || rv == AP_MPM_PODX_RESTART) {
                 /* make sure the start thread has finished;
                  * signal_threads() and join_workers depend on that
                  */
                 join_start_thread(start_thread_id);
                 signal_threads(rv ==
-                               AP_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
+                               AP_MPM_PODX_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
                 break;
             }
         }
@@ -1824,23 +2404,27 @@ static void child_main(int child_num_arg)
     clean_child_exit(resource_shortage ? APEXIT_CHILDSICK : 0);
 }
 
-static int make_child(server_rec * s, int slot)
+static int make_child(server_rec * s, int slot, int bucket)
 {
     int pid;
 
-    if (slot + 1 > max_daemons_limit) {
-        max_daemons_limit = slot + 1;
+    if (slot + 1 > retained->max_daemons_limit) {
+        retained->max_daemons_limit = slot + 1;
     }
 
     if (one_process) {
+        my_bucket = &all_buckets[0];
+
         set_signals();
-        ap_scoreboard_image->parent[slot].pid = getpid();
-        child_main(slot);
+        event_note_child_started(slot, getpid());
+        child_main(slot, 0);
         /* NOTREACHED */
+        ap_assert(0);
+        return -1;
     }
 
     if ((pid = fork()) == -1) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, errno, s,
+        ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, APLOGNO(00481)
                      "fork: Unable to fork new process");
 
         /* fork didn't succeed.  There's no need to touch the scoreboard;
@@ -1859,6 +2443,8 @@ static int make_child(server_rec * s, int slot)
     }
 
     if (!pid) {
+        my_bucket = &all_buckets[bucket];
+
 #ifdef HAVE_BINDPROCESSOR
         /* By default, AIX binds to a single processor.  This bit unbinds
          * children which will then bind to another CPU.
@@ -1867,33 +2453,29 @@ static int make_child(server_rec * s, int slot)
                                    PROCESSOR_CLASS_ANY);
         if (status != OK)
             ap_log_error(APLOG_MARK, APLOG_DEBUG, errno,
-                         ap_server_conf,
+                         ap_server_conf, APLOGNO(00482)
                          "processor unbind failed");
 #endif
         RAISE_SIGSTOP(MAKE_CHILD);
 
         apr_signal(SIGTERM, just_die);
-        child_main(slot);
+        child_main(slot, bucket);
         /* NOTREACHED */
+        ap_assert(0);
+        return -1;
     }
-    /* else */
+
     if (ap_scoreboard_image->parent[slot].pid != 0) {
         /* This new child process is squatting on the scoreboard
          * entry owned by an exiting child process, which cannot
          * exit until all active requests complete.
-         * Don't forget about this exiting child process, or we
-         * won't be able to kill it if it doesn't exit by the
-         * time the server is shut down.
          */
-        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                     "taking over scoreboard slot from %" APR_PID_T_FMT "%s",
-                     ap_scoreboard_image->parent[slot].pid,
-                     ap_scoreboard_image->parent[slot].quiescing ?
-                         " (quiescing)" : "");
-        ap_register_extra_mpm_process(ap_scoreboard_image->parent[slot].pid);
+        event_note_child_lost_slot(slot, pid);
     }
     ap_scoreboard_image->parent[slot].quiescing = 0;
-    ap_scoreboard_image->parent[slot].pid = pid;
+    ap_scoreboard_image->parent[slot].not_accepting = 0;
+    ap_scoreboard_image->parent[slot].bucket = bucket;
+    event_note_child_started(slot, pid);
     return 0;
 }
 
@@ -1906,27 +2488,14 @@ static void startup_children(int number_to_start)
         if (ap_scoreboard_image->parent[i].pid != 0) {
             continue;
         }
-        if (make_child(ap_server_conf, i) < 0) {
+        if (make_child(ap_server_conf, i, i % retained->num_buckets) < 0) {
             break;
         }
         --number_to_start;
     }
 }
 
-
-/*
- * idle_spawn_rate is the number of children that will be spawned on the
- * next maintenance cycle if there aren't enough idle servers.  It is
- * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by
- * without the need to spawn.
- */
-static int idle_spawn_rate = 1;
-#ifndef MAX_SPAWN_RATE
-#define MAX_SPAWN_RATE        (32)
-#endif
-static int hold_off_on_exponential_spawning;
-
-static void perform_idle_server_maintenance(void)
+static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
 {
     int i, j;
     int idle_thread_count;
@@ -1953,13 +2522,14 @@ static void perform_idle_server_maintenance(void)
         int any_dying_threads = 0;
         int any_dead_threads = 0;
         int all_dead_threads = 1;
+        int child_threads_active = 0;
 
-        if (i >= max_daemons_limit
-            && totally_free_length == idle_spawn_rate)
+        if (i >= retained->max_daemons_limit
+            && totally_free_length == retained->idle_spawn_rate[child_bucket])
             /* short cut if all active processes have been examined and
              * enough empty scoreboard slots have been found
              */
-        
+
             break;
         ps = &ap_scoreboard_image->parent[i];
         for (j = 0; j < threads_per_child; j++) {
@@ -1981,18 +2551,21 @@ static void perform_idle_server_maintenance(void)
              */
             if (ps->pid != 0) { /* XXX just set all_dead_threads in outer
                                    for loop if no pid?  not much else matters */
-                if (status <= SERVER_READY &&
-                        !ps->quiescing && ps->generation == my_generation) {
+                if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting
+                    && ps->generation == retained->my_generation
+                    && ps->bucket == child_bucket)
+                {
                     ++idle_thread_count;
                 }
                 if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
-                    ++active_thread_count;
+                    ++child_threads_active;
                 }
             }
         }
+        active_thread_count += child_threads_active;
         if (any_dead_threads
-            && totally_free_length < idle_spawn_rate
-            && free_length < MAX_SPAWN_RATE
+            && totally_free_length < retained->idle_spawn_rate[child_bucket]
+            && free_length < MAX_SPAWN_RATE / num_buckets
             && (!ps->pid      /* no process in the slot */
                   || ps->quiescing)) {  /* or at least one is going away */
             if (all_dead_threads) {
@@ -2013,6 +2586,9 @@ static void perform_idle_server_maintenance(void)
             }
             ++free_length;
         }
+        else if (child_threads_active == threads_per_child) {
+            had_healthy_child = 1;
+        }
         /* XXX if (!ps->quiescing)     is probably more reliable  GLA */
         if (!any_dying_threads) {
             last_non_dead = i;
@@ -2020,63 +2596,63 @@ static void perform_idle_server_maintenance(void)
         }
     }
 
-    if (sick_child_detected) {
-        if (active_thread_count > 0) {
-            /* some child processes appear to be working.  don't kill the
-             * whole server.
+    if (retained->sick_child_detected) {
+        if (had_healthy_child) {
+            /* Assume this is a transient error, even though it may not be.  Leave
+             * the server up in case it is able to serve some requests or the
+             * problem will be resolved.
              */
-            sick_child_detected = 0;
+            retained->sick_child_detected = 0;
         }
         else {
-            /* looks like a basket case give up.
+            /* looks like a basket case, as no child ever fully initialized; give up.
              */
             shutdown_pending = 1;
             child_fatal = 1;
             ap_log_error(APLOG_MARK, APLOG_ALERT, 0,
-                         ap_server_conf,
-                         "No active workers found..."
-                         " Apache is exiting!");
+                         ap_server_conf, APLOGNO(02324)
+                         "A resource shortage or other unrecoverable failure "
+                         "was encountered before any child process initialized "
+                         "successfully... httpd is exiting!");
             /* the child already logged the failure details */
             return;
         }
     }
 
-    max_daemons_limit = last_non_dead + 1;
+    retained->max_daemons_limit = last_non_dead + 1;
 
-    if (idle_thread_count > max_spare_threads) {
+    if (idle_thread_count > max_spare_threads / num_buckets) {
         /* Kill off one child */
-        ap_event_pod_signal(pod, TRUE);
-        idle_spawn_rate = 1;
+        ap_mpm_podx_signal(all_buckets[child_bucket].pod,
+                           AP_MPM_PODX_GRACEFUL);
+        retained->idle_spawn_rate[child_bucket] = 1;
     }
-    else if (idle_thread_count < min_spare_threads) {
+    else if (idle_thread_count < min_spare_threads / num_buckets) {
         /* terminate the free list */
         if (free_length == 0) { /* scoreboard is full, can't fork */
 
             if (active_thread_count >= ap_daemons_limit * threads_per_child) {
-                static int reported = 0;
-                if (!reported) {
+                if (!retained->maxclients_reported) {
                     /* only report this condition once */
-                    ap_log_error(APLOG_MARK, APLOG_ERR, 0,
-                                 ap_server_conf,
-                                 "server reached MaxClients setting, consider"
-                                 " raising the MaxClients setting");
-                    reported = 1;
+                    ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00484)
+                                 "server reached MaxRequestWorkers setting, "
+                                 "consider raising the MaxRequestWorkers "
+                                 "setting");
+                    retained->maxclients_reported = 1;
                 }
             }
             else {
-                ap_log_error(APLOG_MARK, APLOG_ERR, 0,
-                             ap_server_conf,
-                             "scoreboard is full, not at MaxClients");
+                ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00485)
+                             "scoreboard is full, not at MaxRequestWorkers");
             }
-            idle_spawn_rate = 1;
+            retained->idle_spawn_rate[child_bucket] = 1;
         }
         else {
-            if (free_length > idle_spawn_rate) {
-                free_length = idle_spawn_rate;
+            if (free_length > retained->idle_spawn_rate[child_bucket]) {
+                free_length = retained->idle_spawn_rate[child_bucket];
             }
-            if (idle_spawn_rate >= 8) {
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0,
-                             ap_server_conf,
+            if (retained->idle_spawn_rate[child_bucket] >= 8) {
+                ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00486)
                              "server seems busy, (you may need "
                              "to increase StartServers, ThreadsPerChild "
                              "or Min/MaxSpareThreads), "
@@ -2085,26 +2661,28 @@ static void perform_idle_server_maintenance(void)
                              idle_thread_count, total_non_dead);
             }
             for (i = 0; i < free_length; ++i) {
-                make_child(ap_server_conf, free_slots[i]);
+                make_child(ap_server_conf, free_slots[i], child_bucket);
             }
             /* the next time around we want to spawn twice as many if this
              * wasn't good enough, but not if we've just done a graceful
              */
-            if (hold_off_on_exponential_spawning) {
-                --hold_off_on_exponential_spawning;
+            if (retained->hold_off_on_exponential_spawning) {
+                --retained->hold_off_on_exponential_spawning;
             }
-            else if (idle_spawn_rate < MAX_SPAWN_RATE) {
-                idle_spawn_rate *= 2;
+            else if (retained->idle_spawn_rate[child_bucket]
+                     < MAX_SPAWN_RATE / num_buckets) {
+                retained->idle_spawn_rate[child_bucket] *= 2;
             }
         }
     }
     else {
-        idle_spawn_rate = 1;
+        retained->idle_spawn_rate[child_bucket] = 1;
     }
 }
 
-static void server_main_loop(int remaining_children_to_start)
+static void server_main_loop(int remaining_children_to_start, int num_buckets)
 {
+    ap_generation_t old_gen;
     int child_slot;
     apr_exit_why_e exitwhy;
     int status, processed_status;
@@ -2116,42 +2694,70 @@ static void server_main_loop(int remaining_children_to_start)
 
         if (pid.pid != -1) {
             processed_status = ap_process_child_status(&pid, exitwhy, status);
+            child_slot = ap_find_child_by_pid(&pid);
             if (processed_status == APEXIT_CHILDFATAL) {
-                shutdown_pending = 1;
-                child_fatal = 1;
-                return;
+                /* fix race condition found in PR 39311
+                 * A child created at the same time as a graceful happens 
+                 * can find the lock missing and create a fatal error.
+                 * It is not fatal for the last generation to be in this state.
+                 */
+                if (child_slot < 0
+                    || ap_get_scoreboard_process(child_slot)->generation
+                       == retained->my_generation) {
+                    shutdown_pending = 1;
+                    child_fatal = 1;
+                    return;
+                }
+                else {
+                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(00487)
+                                 "Ignoring fatal error in child of previous "
+                                 "generation (pid %ld).",
+                                 (long)pid.pid);
+                    retained->sick_child_detected = 1;
+                }
             }
             else if (processed_status == APEXIT_CHILDSICK) {
                 /* tell perform_idle_server_maintenance to check into this
                  * on the next timer pop
                  */
-                sick_child_detected = 1;
+                retained->sick_child_detected = 1;
             }
             /* non-fatal death... note that it's gone in the scoreboard. */
-            child_slot = ap_find_child_by_pid(&pid);
             if (child_slot >= 0) {
+                process_score *ps;
+
                 for (i = 0; i < threads_per_child; i++)
                     ap_update_child_status_from_indexes(child_slot, i,
                                                         SERVER_DEAD,
                                                         (request_rec *) NULL);
 
-                ap_scoreboard_image->parent[child_slot].pid = 0;
-                ap_scoreboard_image->parent[child_slot].quiescing = 0;
+                event_note_child_killed(child_slot, 0, 0);
+                ps = &ap_scoreboard_image->parent[child_slot];
+                ps->quiescing = 0;
                 if (processed_status == APEXIT_CHILDSICK) {
                     /* resource shortage, minimize the fork rate */
-                    idle_spawn_rate = 1;
+                    retained->idle_spawn_rate[ps->bucket] = 1;
                 }
                 else if (remaining_children_to_start
                          && child_slot < ap_daemons_limit) {
                     /* we're still doing a 1-for-1 replacement of dead
                      * children with new children
                      */
-                    make_child(ap_server_conf, child_slot);
+                    make_child(ap_server_conf, child_slot, ps->bucket);
                     --remaining_children_to_start;
                 }
             }
-            else if (ap_unregister_extra_mpm_process(pid.pid) == 1) {
-                /* handled */
+            else if (ap_unregister_extra_mpm_process(pid.pid, &old_gen) == 1) {
+
+                event_note_child_killed(-1, /* already out of the scoreboard */
+                                        pid.pid, old_gen);
+                if (processed_status == APEXIT_CHILDSICK
+                    && old_gen == retained->my_generation) {
+                    /* resource shortage, minimize the fork rate */
+                    for (i = 0; i < num_buckets; i++) {
+                        retained->idle_spawn_rate[i] = 1;
+                    }
+                }
 #if APR_HAS_OTHER_CHILD
             }
             else if (apr_proc_other_child_alert(&pid, APR_OC_REASON_DEATH,
@@ -2159,12 +2765,12 @@ static void server_main_loop(int remaining_children_to_start)
                 /* handled */
 #endif
             }
-            else if (is_graceful) {
+            else if (retained->is_graceful) {
                 /* Great, we've probably just lost a slot in the
                  * scoreboard.  Somehow we don't know about this child.
                  */
                 ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
-                             ap_server_conf,
+                             ap_server_conf, APLOGNO(00488)
                              "long lost child came home! (pid %ld)",
                              (long) pid.pid);
             }
@@ -2189,17 +2795,21 @@ static void server_main_loop(int remaining_children_to_start)
             continue;
         }
 
-        perform_idle_server_maintenance();
+        for (i = 0; i < num_buckets; i++) {
+            perform_idle_server_maintenance(i, num_buckets);
+        }
     }
 }
 
 static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
 {
+    int num_buckets = retained->num_buckets;
     int remaining_children_to_start;
+    int i;
 
     ap_log_pid(pconf, ap_pid_fname);
 
-    if (!is_graceful) {
+    if (!retained->is_graceful) {
         if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) {
             mpm_state = AP_MPMQ_STOPPING;
             return DONE;
@@ -2207,13 +2817,23 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
         /* fix the generation number in the global score; we just got a new,
          * cleared scoreboard
          */
-        ap_scoreboard_image->global->running_generation = my_generation;
+        ap_scoreboard_image->global->running_generation = retained->my_generation;
     }
 
+    restart_pending = shutdown_pending = 0;
     set_signals();
-    /* Don't thrash... */
-    if (max_spare_threads < min_spare_threads + threads_per_child)
-        max_spare_threads = min_spare_threads + threads_per_child;
+
+    /* Don't thrash since num_buckets depends on the
+     * system and the number of online CPU cores...
+     */
+    if (ap_daemons_limit < num_buckets)
+        ap_daemons_limit = num_buckets;
+    if (ap_daemons_to_start < num_buckets)
+        ap_daemons_to_start = num_buckets;
+    if (min_spare_threads < threads_per_child * num_buckets)
+        min_spare_threads = threads_per_child * num_buckets;
+    if (max_spare_threads < min_spare_threads + threads_per_child * num_buckets)
+        max_spare_threads = min_spare_threads + threads_per_child * num_buckets;
 
     /* If we're doing a graceful_restart then we're going to see a lot
      * of children exiting immediately when we get into the main loop
@@ -2228,48 +2848,45 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
     if (remaining_children_to_start > ap_daemons_limit) {
         remaining_children_to_start = ap_daemons_limit;
     }
-    if (!is_graceful) {
+    if (!retained->is_graceful) {
         startup_children(remaining_children_to_start);
         remaining_children_to_start = 0;
     }
     else {
         /* give the system some time to recover before kicking into
          * exponential mode */
-        hold_off_on_exponential_spawning = 10;
+        retained->hold_off_on_exponential_spawning = 10;
     }
 
-    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00489)
                  "%s configured -- resuming normal operations",
                  ap_get_server_description());
-    ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00490)
                  "Server built: %s", ap_get_server_built());
     ap_log_command_line(plog, s);
+    ap_log_mpm_common(s);
 
-    restart_pending = shutdown_pending = 0;
     mpm_state = AP_MPMQ_RUNNING;
 
-    server_main_loop(remaining_children_to_start);
+    server_main_loop(remaining_children_to_start, num_buckets);
     mpm_state = AP_MPMQ_STOPPING;
 
-    if (shutdown_pending && !is_graceful) {
+    if (shutdown_pending && !retained->is_graceful) {
         /* Time to shut down:
          * Kill child processes, tell them to call child_exit, etc...
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
-        ap_reclaim_child_processes(1);  /* Start with SIGTERM */
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
+        ap_reclaim_child_processes(1, /* Start with SIGTERM */
+                                   event_note_child_killed);
 
         if (!child_fatal) {
             /* cleanup pid file on normal shutdown */
-            const char *pidfile = NULL;
-            pidfile = ap_server_root_relative(pconf, ap_pid_fname);
-            if (pidfile != NULL && unlink(pidfile) == 0)
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0,
-                             ap_server_conf,
-                             "removed PID file %s (pid=%ld)",
-                             pidfile, (long) getpid());
-
+            ap_remove_pid(pconf, ap_pid_fname);
             ap_log_error(APLOG_MARK, APLOG_NOTICE, 0,
-                         ap_server_conf, "caught SIGTERM, shutting down");
+                         ap_server_conf, APLOGNO(00491) "caught SIGTERM, shutting down");
         }
         return DONE;
     } else if (shutdown_pending) {
@@ -2282,20 +2899,16 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
 
         /* Close our listeners, and then ask our children to do same */
         ap_close_listeners();
-        ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
-        ap_relieve_child_processes();
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_GRACEFUL);
+        }
+        ap_relieve_child_processes(event_note_child_killed);
 
         if (!child_fatal) {
             /* cleanup pid file on normal shutdown */
-            const char *pidfile = NULL;
-            pidfile = ap_server_root_relative (pconf, ap_pid_fname);
-            if ( pidfile != NULL && unlink(pidfile) == 0)
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0,
-                             ap_server_conf,
-                             "removed PID file %s (pid=%ld)",
-                             pidfile, (long)getpid());
-
-            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+            ap_remove_pid(pconf, ap_pid_fname);
+            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00492)
                          "caught " AP_SIG_GRACEFUL_STOP_STRING
                          ", shutting down gracefully");
         }
@@ -2312,7 +2925,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
             apr_sleep(apr_time_from_sec(1));
 
             /* Relieve any children which have now exited */
-            ap_relieve_child_processes();
+            ap_relieve_child_processes(event_note_child_killed);
 
             active_children = 0;
             for (index = 0; index < ap_daemons_limit; ++index) {
@@ -2329,8 +2942,11 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
          * way, try and make sure that all of our processes are
          * really dead.
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
-        ap_reclaim_child_processes(1);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
+        ap_reclaim_child_processes(1, event_note_child_killed);
 
         return DONE;
     }
@@ -2347,16 +2963,18 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
     /* XXX: we really need to make sure this new generation number isn't in
      * use by any of the children.
      */
-    ++my_generation;
-    ap_scoreboard_image->global->running_generation = my_generation;
+    ++retained->my_generation;
+    ap_scoreboard_image->global->running_generation = retained->my_generation;
 
-    if (is_graceful) {
-        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+    if (retained->is_graceful) {
+        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00493)
                      AP_SIG_GRACEFUL_STRING
                      " received.  Doing graceful restart");
         /* wake up the children...time to die.  But we'll have more soon */
-        ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
-
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_GRACEFUL);
+        }
 
         /* This is mostly for debugging... so that we know what is still
          * gracefully dealing with existing request.
@@ -2368,10 +2986,14 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
          * and a SIGHUP, we may as well use the same signal, because some user
          * pthreads are stealing signals from us left and right.
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
 
-        ap_reclaim_child_processes(1);  /* Start with SIGTERM */
-        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+        ap_reclaim_child_processes(1,  /* Start with SIGTERM */
+                                   event_note_child_killed);
+        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00494)
                      "SIGHUP received.  Attempting to restart");
     }
 
@@ -2386,7 +3008,10 @@ static int event_open_logs(apr_pool_t * p, apr_pool_t * plog,
 {
     int startup = 0;
     int level_flags = 0;
+    int num_buckets = 0;
+    ap_listen_rec **listen_buckets;
     apr_status_t rv;
+    int i;
 
     pconf = p;
 
@@ -2403,14 +3028,64 @@ static int event_open_logs(apr_pool_t * p, apr_pool_t * plog,
         return DONE;
     }
 
-    if (!one_process) {
-        if ((rv = ap_event_pod_open(pconf, &pod))) {
+    if (one_process) {
+        num_buckets = 1;
+    }
+    else if (retained->is_graceful) {
+        /* Preserve the number of buckets on graceful restarts. */
+        num_buckets = retained->num_buckets;
+    }
+    if ((rv = ap_duplicate_listeners(pconf, ap_server_conf,
+                                     &listen_buckets, &num_buckets))) {
+        ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
+                     (startup ? NULL : s),
+                     "could not duplicate listeners");
+        return DONE;
+    }
+
+    all_buckets = apr_pcalloc(pconf, num_buckets * sizeof(*all_buckets));
+    for (i = 0; i < num_buckets; i++) {
+        if (!one_process && /* no POD in one_process mode */
+                (rv = ap_mpm_podx_open(pconf, &all_buckets[i].pod))) {
             ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
                          (startup ? NULL : s),
                          "could not open pipe-of-death");
             return DONE;
         }
+        all_buckets[i].listeners = listen_buckets[i];
     }
+
+    if (retained->max_buckets < num_buckets) {
+        int new_max, *new_ptr;
+        new_max = retained->max_buckets * 2;
+        if (new_max < num_buckets) {
+            new_max = num_buckets;
+        }
+        new_ptr = (int *)apr_palloc(ap_pglobal, new_max * sizeof(int));
+        memcpy(new_ptr, retained->idle_spawn_rate,
+               retained->num_buckets * sizeof(int));
+        retained->idle_spawn_rate = new_ptr;
+        retained->max_buckets = new_max;
+    }
+    if (retained->num_buckets < num_buckets) {
+        int rate_max = 1;
+        /* If new buckets are added, set their idle spawn rate to
+         * the highest so far, so that they get filled as quickly
+         * as the existing ones.
+         */
+        for (i = 0; i < retained->num_buckets; i++) {
+            if (rate_max < retained->idle_spawn_rate[i]) {
+                rate_max = retained->idle_spawn_rate[i];
+            }
+        }
+        for (/* up to date i */; i < num_buckets; i++) {
+            retained->idle_spawn_rate[i] = rate_max;
+        }
+    }
+    retained->num_buckets = num_buckets;
+
+    /* for skiplist */
+    srand((unsigned int)apr_time_now());
     return OK;
 }
 
@@ -2439,13 +3114,25 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
     retained = ap_retained_data_get(userdata_key);
     if (!retained) {
         retained = ap_retained_data_create(userdata_key, sizeof(*retained));
+        retained->max_daemons_limit = -1;
     }
     ++retained->module_loads;
     if (retained->module_loads == 2) {
+        /* test for correct operation of fdqueue */
+        static apr_uint32_t foo1, foo2;
+
+        apr_atomic_set32(&foo1, 100);
+        foo2 = apr_atomic_add32(&foo1, -10);
+        if (foo2 != 100 || foo1 != 90) {
+            ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL, APLOGNO(02405)
+                         "atomics not working as expected - add32 of negative number");
+            return HTTP_INTERNAL_SERVER_ERROR;
+        }
+
         rv = apr_pollset_create(&event_pollset, 1, plog,
                                 APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
         if (rv != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
+            ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00495)
                          "Couldn't create a Thread Safe Pollset. "
                          "Is it supported on your platform?"
                          "Also check system or user limits!");
@@ -2454,17 +3141,20 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
         apr_pollset_destroy(event_pollset);
 
         if (!one_process && !foreground) {
+            /* before we detach, setup crash handlers to log to errorlog */
+            ap_fatal_signal_setup(ap_server_conf, pconf);
             rv = apr_proc_detach(no_detach ? APR_PROC_DETACH_FOREGROUND
                                  : APR_PROC_DETACH_DAEMONIZE);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00496)
                              "apr_proc_detach failed");
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
         }
-        parent_pid = ap_my_pid = getpid();
     }
 
+    parent_pid = ap_my_pid = getpid();
+
     ap_listen_pre_config();
     ap_daemons_to_start = DEFAULT_START_DAEMON;
     min_spare_threads = DEFAULT_MIN_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD;
@@ -2473,13 +3163,72 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
     thread_limit = DEFAULT_THREAD_LIMIT;
     ap_daemons_limit = server_limit;
     threads_per_child = DEFAULT_THREADS_PER_CHILD;
-    max_clients = ap_daemons_limit * threads_per_child;
-    ap_pid_fname = DEFAULT_PIDLOG;
-    ap_max_requests_per_child = DEFAULT_MAX_REQUESTS_PER_CHILD;
+    max_workers = ap_daemons_limit * threads_per_child;
+    had_healthy_child = 0;
     ap_extended_status = 0;
-    ap_max_mem_free = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
 
-    apr_cpystrn(ap_coredump_dir, ap_server_root, sizeof(ap_coredump_dir));
+    return OK;
+}
+
+static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+                             apr_pool_t *ptemp, server_rec *s)
+{
+    struct {
+        struct timeout_queue *tail, *q;
+        apr_hash_t *hash;
+    } wc, ka;
+
+    /* Not needed in pre_config stage */
+    if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
+        return OK;
+    }
+
+    wc.tail = ka.tail = NULL;
+    wc.hash = apr_hash_make(ptemp);
+    ka.hash = apr_hash_make(ptemp);
+
+    TO_QUEUE_INIT(linger_q, pconf,
+                  apr_time_from_sec(MAX_SECS_TO_LINGER), NULL);
+    TO_QUEUE_INIT(short_linger_q, pconf,
+                  apr_time_from_sec(SECONDS_TO_LINGER), NULL);
+
+    for (; s; s = s->next) {
+        event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
+
+        ap_set_module_config(s->module_config, &mpm_event_module, sc);
+        if (!wc.tail) {
+            /* The main server uses the global queues */
+            TO_QUEUE_INIT(wc.q, pconf, s->timeout, NULL);
+            apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
+            wc.tail = write_completion_q = wc.q;
+
+            TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, NULL);
+            apr_hash_set(ka.hash, &s->keep_alive_timeout,
+                         sizeof s->keep_alive_timeout, ka.q);
+            ka.tail = keepalive_q = ka.q;
+        }
+        else {
+            /* The vhosts use any existing queue with the same timeout,
+             * or their own queue(s) if there isn't */
+            wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
+            if (!wc.q) {
+                TO_QUEUE_INIT(wc.q, pconf, s->timeout, wc.tail);
+                apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
+                wc.tail = wc.tail->next = wc.q;
+            }
+
+            ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout,
+                                sizeof s->keep_alive_timeout);
+            if (!ka.q) {
+                TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, ka.tail);
+                apr_hash_set(ka.hash, &s->keep_alive_timeout,
+                             sizeof s->keep_alive_timeout, ka.q);
+                ka.tail = ka.tail->next = ka.q;
+            }
+        }
+        sc->wc_q = wc.q;
+        sc->ka_q = ka.q;
+    }
 
     return OK;
 }
@@ -2496,14 +3245,14 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (server_limit > MAX_SERVER_LIMIT) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00497)
                          "WARNING: ServerLimit of %d exceeds compile-time "
                          "limit of", server_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03105)
                          " %d servers, decreasing to %d.",
                          MAX_SERVER_LIMIT, MAX_SERVER_LIMIT);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00498)
                          "ServerLimit of %d exceeds compile-time limit "
                          "of %d, decreasing to match",
                          server_limit, MAX_SERVER_LIMIT);
@@ -2512,11 +3261,11 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (server_limit < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00499)
                          "WARNING: ServerLimit of %d not allowed, "
                          "increasing to 1.", server_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00500)
                          "ServerLimit of %d not allowed, increasing to 1",
                          server_limit);
         }
@@ -2531,7 +3280,7 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (server_limit != retained->first_server_limit) {
         /* don't need a startup console version here */
-        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00501)
                      "changing ServerLimit to %d from original value of %d "
                      "not allowed during restart",
                      server_limit, retained->first_server_limit);
@@ -2540,14 +3289,14 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (thread_limit > MAX_THREAD_LIMIT) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00502)
                          "WARNING: ThreadLimit of %d exceeds compile-time "
                          "limit of", thread_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03106)
                          " %d threads, decreasing to %d.",
                          MAX_THREAD_LIMIT, MAX_THREAD_LIMIT);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00503)
                          "ThreadLimit of %d exceeds compile-time limit "
                          "of %d, decreasing to match",
                          thread_limit, MAX_THREAD_LIMIT);
@@ -2556,11 +3305,11 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (thread_limit < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00504)
                          "WARNING: ThreadLimit of %d not allowed, "
                          "increasing to 1.", thread_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00505)
                          "ThreadLimit of %d not allowed, increasing to 1",
                          thread_limit);
         }
@@ -2575,7 +3324,7 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (thread_limit != retained->first_thread_limit) {
         /* don't need a startup console version here */
-        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00506)
                      "changing ThreadLimit to %d from original value of %d "
                      "not allowed during restart",
                      thread_limit, retained->first_thread_limit);
@@ -2584,17 +3333,17 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (threads_per_child > thread_limit) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00507)
                          "WARNING: ThreadsPerChild of %d exceeds ThreadLimit "
                          "of", threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03107)
                          " %d threads, decreasing to %d.",
                          thread_limit, thread_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03108)
                          " To increase, please see the ThreadLimit "
                          "directive.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00508)
                          "ThreadsPerChild of %d exceeds ThreadLimit "
                          "of %d, decreasing to match",
                          threads_per_child, thread_limit);
@@ -2603,92 +3352,92 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (threads_per_child < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00509)
                          "WARNING: ThreadsPerChild of %d not allowed, "
                          "increasing to 1.", threads_per_child);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00510)
                          "ThreadsPerChild of %d not allowed, increasing to 1",
                          threads_per_child);
         }
         threads_per_child = 1;
     }
 
-    if (max_clients < threads_per_child) {
+    if (max_workers < threads_per_child) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d is less than "
-                         "ThreadsPerChild of", max_clients);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " %d, increasing to %d.  MaxClients must be at "
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00511)
+                         "WARNING: MaxRequestWorkers of %d is less than "
+                         "ThreadsPerChild of", max_workers);
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03109)
+                         " %d, increasing to %d.  MaxRequestWorkers must be at "
                          "least as large",
                          threads_per_child, threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03110)
                          " as the number of threads in a single server.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d is less than ThreadsPerChild "
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00512)
+                         "MaxRequestWorkers of %d is less than ThreadsPerChild "
                          "of %d, increasing to match",
-                         max_clients, threads_per_child);
+                         max_workers, threads_per_child);
         }
-        max_clients = threads_per_child;
+        max_workers = threads_per_child;
     }
 
-    ap_daemons_limit = max_clients / threads_per_child;
+    ap_daemons_limit = max_workers / threads_per_child;
 
-    if (max_clients % threads_per_child) {
-        int tmp_max_clients = ap_daemons_limit * threads_per_child;
+    if (max_workers % threads_per_child) {
+        int tmp_max_workers = ap_daemons_limit * threads_per_child;
 
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d is not an integer "
-                         "multiple of", max_clients);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00513)
+                         "WARNING: MaxRequestWorkers of %d is not an integer "
+                         "multiple of", max_workers);
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03111)
                          " ThreadsPerChild of %d, decreasing to nearest "
                          "multiple %d,", threads_per_child,
-                         tmp_max_clients);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+                         tmp_max_workers);
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03112)
                          " for a maximum of %d servers.",
                          ap_daemons_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d is not an integer multiple of "
-                         "ThreadsPerChild of %d, decreasing to nearest "
-                         "multiple %d", max_clients, threads_per_child,
-                         tmp_max_clients);
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00514)
+                         "MaxRequestWorkers of %d is not an integer multiple "
+                         "of ThreadsPerChild of %d, decreasing to nearest "
+                         "multiple %d", max_workers, threads_per_child,
+                         tmp_max_workers);
         }
-        max_clients = tmp_max_clients;
+        max_workers = tmp_max_workers;
     }
 
     if (ap_daemons_limit > server_limit) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d would require %d "
-                         "servers and ", max_clients, ap_daemons_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00515)
+                         "WARNING: MaxRequestWorkers of %d would require %d "
+                         "servers and ", max_workers, ap_daemons_limit);
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03113)
                          " would exceed ServerLimit of %d, decreasing to %d.",
                          server_limit, server_limit * threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03114)
                          " To increase, please see the ServerLimit "
                          "directive.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d would require %d servers and "
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00516)
+                         "MaxRequestWorkers of %d would require %d servers and "
                          "exceed ServerLimit of %d, decreasing to %d",
-                         max_clients, ap_daemons_limit, server_limit,
+                         max_workers, ap_daemons_limit, server_limit,
                          server_limit * threads_per_child);
         }
         ap_daemons_limit = server_limit;
     }
 
     /* ap_daemons_to_start > ap_daemons_limit checked in ap_mpm_run() */
-    if (ap_daemons_to_start < 0) {
+    if (ap_daemons_to_start < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00517)
                          "WARNING: StartServers of %d not allowed, "
                          "increasing to 1.", ap_daemons_to_start);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00518)
                          "StartServers of %d not allowed, increasing to 1",
                          ap_daemons_to_start);
         }
@@ -2697,15 +3446,15 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (min_spare_threads < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00519)
                          "WARNING: MinSpareThreads of %d not allowed, "
                          "increasing to 1", min_spare_threads);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03115)
                          " to avoid almost certain server failure.");
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(03116)
                          " Please read the documentation.");
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00520)
                          "MinSpareThreads of %d not allowed, increasing to 1",
                          min_spare_threads);
         }
@@ -2733,12 +3482,14 @@ static void event_hooks(apr_pool_t * p)
      * to retrieve it, so register as REALLY_FIRST
      */
     ap_hook_pre_config(event_pre_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
+    ap_hook_post_config(event_post_config, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_check_config(event_check_config, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm(event_run, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_query(event_query, NULL, NULL, APR_HOOK_MIDDLE);
-    ap_hook_mpm_note_child_killed(event_note_child_killed, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_register_timed_callback(event_register_timed_callback, NULL, NULL,
                                         APR_HOOK_MIDDLE);
+    ap_hook_pre_read_request(event_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE);
+    ap_hook_post_read_request(event_post_read_request, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_get_name(event_get_name, NULL, NULL, APR_HOOK_MIDDLE);
 }
 
@@ -2778,15 +3529,19 @@ static const char *set_max_spare_threads(cmd_parms * cmd, void *dummy,
     return NULL;
 }
 
-static const char *set_max_clients(cmd_parms * cmd, void *dummy,
+static const char *set_max_workers(cmd_parms * cmd, void *dummy,
                                    const char *arg)
 {
     const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
     if (err != NULL) {
         return err;
     }
-
-    max_clients = atoi(arg);
+    if (!strcasecmp(cmd->cmd->name, "MaxClients")) {
+        ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(00521)
+                     "MaxClients is deprecated, use MaxRequestWorkers "
+                     "instead.");
+    }
+    max_workers = atoi(arg);
     return NULL;
 }
 
@@ -2824,6 +3579,30 @@ static const char *set_thread_limit(cmd_parms * cmd, void *dummy,
     return NULL;
 }
 
+static const char *set_worker_factor(cmd_parms * cmd, void *dummy,
+                                     const char *arg)
+{
+    double val;
+    char *endptr;
+    const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+    if (err != NULL) {
+        return err;
+    }
+
+    val = strtod(arg, &endptr);
+    if (*endptr)
+        return "error parsing value";
+
+    if (val <= 0)
+        return "AsyncRequestWorkerFactor argument must be a positive number";
+
+    worker_factor = val * WORKER_FACTOR_SCALE;
+    if (worker_factor == 0)
+        worker_factor = 1;
+    return NULL;
+}
+
+
 static const command_rec event_cmds[] = {
     LISTEN_COMMANDS,
     AP_INIT_TAKE1("StartServers", set_daemons_to_start, NULL, RSRC_CONF,
@@ -2834,13 +3613,18 @@ static const command_rec event_cmds[] = {
                   "Minimum number of idle threads, to handle request spikes"),
     AP_INIT_TAKE1("MaxSpareThreads", set_max_spare_threads, NULL, RSRC_CONF,
                   "Maximum number of idle threads"),
-    AP_INIT_TAKE1("MaxClients", set_max_clients, NULL, RSRC_CONF,
+    AP_INIT_TAKE1("MaxClients", set_max_workers, NULL, RSRC_CONF,
+                  "Deprecated name of MaxRequestWorkers"),
+    AP_INIT_TAKE1("MaxRequestWorkers", set_max_workers, NULL, RSRC_CONF,
                   "Maximum number of threads alive at the same time"),
     AP_INIT_TAKE1("ThreadsPerChild", set_threads_per_child, NULL, RSRC_CONF,
                   "Number of threads each child creates"),
     AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF,
                   "Maximum number of worker threads per child process for this "
                   "run of Apache - Upper limit for ThreadsPerChild"),
+    AP_INIT_TAKE1("AsyncRequestWorkerFactor", set_worker_factor, NULL, RSRC_CONF,
+                  "How many additional connects will be accepted per idle "
+                  "worker thread"),
     AP_GRACEFUL_SHUTDOWN_TIMEOUT_COMMAND,
     {NULL}
 };