]> granicus.if.org Git - apache/blobdiff - server/mpm/event/event.c
Merge r1809273, r1814719 from trunk:
[apache] / server / mpm / event / event.c
index 452a67b773ac907af9cba2dc93c3ca988c5522a7..bd3ad3c9989d10ee18f7640ea914a27c46b87030 100644 (file)
  *
  * After a client completes the first request, the client can keep the
  * connection open to send more requests with the same socket.  This can save
- * signifigant overhead in creating TCP connections.  However, the major
+ * significant overhead in creating TCP connections.  However, the major
  * disadvantage is that Apache traditionally keeps an entire child
  * process/thread waiting for data from the client.  To solve this problem,
- * this MPM has a dedicated thread for handling both the Listenting sockets,
+ * this MPM has a dedicated thread for handling both the Listening sockets,
  * and all sockets that are in a Keep Alive status.
  *
  * The MPM assumes the underlying apr_pollset implementation is somewhat
@@ -30,7 +30,7 @@
  * enables the MPM to avoid extra high level locking or having to wake up the
  * listener thread when a keep-alive socket needs to be sent to it.
  *
- * This MPM not preform well on older platforms that do not have very good
+ * This MPM does not perform well on older platforms that do not have very good
  * threading, like Linux with a 2.4 kernel, but this does not matter, since we
  * require EPoll or KQueue.
  *
 #include "apr_poll.h"
 #include "apr_ring.h"
 #include "apr_queue.h"
+#include "apr_atomic.h"
 #define APR_WANT_STRFUNC
 #include "apr_want.h"
 #include "apr_version.h"
 
+#include <stdlib.h>
+
 #if APR_HAVE_UNISTD_H
 #include <unistd.h>
 #endif
@@ -82,8 +85,8 @@
 #include "http_config.h"        /* for read_config */
 #include "http_core.h"          /* for get_remote_host */
 #include "http_connection.h"
+#include "http_protocol.h"
 #include "ap_mpm.h"
-#include "pod.h"
 #include "mpm_common.h"
 #include "ap_listen.h"
 #include "scoreboard.h"
 #include "mpm_default.h"
 #include "http_vhost.h"
 #include "unixd.h"
+#include "apr_skiplist.h"
 
 #include <signal.h>
 #include <limits.h>             /* for INT_MAX */
 
 
-#if HAVE_SERF
-#include "mod_serf.h"
-#include "serf.h"
-#endif
-
 /* Limit on the total --- clients will be locked out if more servers than
  * this are needed.  It is intended solely to keep the server from crashing
  * when things get out of hand.
 #define apr_time_from_msec(x) (x * 1000)
 #endif
 
+#ifndef MAX_SECS_TO_LINGER
+#define MAX_SECS_TO_LINGER 30
+#endif
+#define SECONDS_TO_LINGER  2
+
 /*
  * Actual definitions of config globals
  */
 
-static int threads_per_child = 0;   /* Worker threads per child */
-static int ap_daemons_to_start = 0;
-static int min_spare_threads = 0;
-static int max_spare_threads = 0;
-static int ap_daemons_limit = 0;
-static int max_clients = 0;
-static int server_limit = 0;
-static int thread_limit = 0;
+#ifndef DEFAULT_WORKER_FACTOR
+#define DEFAULT_WORKER_FACTOR 2
+#endif
+#define WORKER_FACTOR_SCALE   16  /* scale factor to allow fractional values */
+static unsigned int worker_factor = DEFAULT_WORKER_FACTOR * WORKER_FACTOR_SCALE;
+    /* AsyncRequestWorkerFactor * 16 */
+
+static int threads_per_child = 0;           /* ThreadsPerChild */
+static int ap_daemons_to_start = 0;         /* StartServers */
+static int min_spare_threads = 0;           /* MinSpareThreads */
+static int max_spare_threads = 0;           /* MaxSpareThreads */
+static int active_daemons_limit = 0;        /* MaxRequestWorkers / ThreadsPerChild */
+static int active_daemons = 0;              /* workers that still active, i.e. are
+                                               not shutting down gracefully */
+static int max_workers = 0;                 /* MaxRequestWorkers */
+static int server_limit = 0;                /* ServerLimit */
+static int thread_limit = 0;                /* ThreadLimit */
+static int had_healthy_child = 0;
 static int dying = 0;
 static int workers_may_exit = 0;
 static int start_thread_may_exit = 0;
 static int listener_may_exit = 0;
-static int requests_this_child;
+static int listener_is_wakeable = 0;        /* Pollset supports APR_POLLSET_WAKEABLE */
 static int num_listensocks = 0;
+static apr_int32_t conns_this_child;        /* MaxConnectionsPerChild, only access
+                                               in listener thread */
+static apr_uint32_t connection_count = 0;   /* Number of open connections */
+static apr_uint32_t lingering_count = 0;    /* Number of connections in lingering close */
+static apr_uint32_t suspended_count = 0;    /* Number of suspended connections */
+static apr_uint32_t clogged_count = 0;      /* Number of threads processing ssl conns */
+static apr_uint32_t threads_shutdown = 0;   /* Number of threads that have shutdown
+                                               early during graceful termination */
 static int resource_shortage = 0;
 static fd_queue_t *worker_queue;
 static fd_queue_info_t *worker_queue_info;
-static int mpm_state = AP_MPMQ_STARTING;
-static int sick_child_detected;
-static ap_generation_t volatile my_generation = 0;
 
 static apr_thread_mutex_t *timeout_mutex;
-APR_RING_HEAD(timeout_head_t, conn_state_t);
-static struct timeout_head_t timeout_head, keepalive_timeout_head;
 
+module AP_MODULE_DECLARE_DATA mpm_event_module;
+
+/* forward declare */
+struct event_srv_cfg_s;
+typedef struct event_srv_cfg_s event_srv_cfg;
+
+static apr_pollfd_t *listener_pollfd;
+
+/*
+ * The pollset for sockets that are in any of the timeout queues. Currently
+ * we use the timeout_mutex to make sure that connections are added/removed
+ * atomically to/from both event_pollset and a timeout queue. Otherwise
+ * some confusion can happen under high load if timeout queues and pollset
+ * get out of sync.
+ * XXX: It should be possible to make the lock unnecessary in many or even all
+ * XXX: cases.
+ */
 static apr_pollset_t *event_pollset;
 
-#if HAVE_SERF
-typedef struct {
-    apr_pollset_t *pollset;
-    apr_pool_t *pool;
-} s_baton_t;
+/*
+ * The chain of connections to be shutdown by a worker thread (deferred),
+ * linked list updated atomically.
+ */
+static event_conn_state_t *volatile defer_linger_chain;
+
+struct event_conn_state_t {
+    /** APR_RING of expiration timeouts */
+    APR_RING_ENTRY(event_conn_state_t) timeout_list;
+    /** the time when the entry was queued */
+    apr_time_t queue_timestamp;
+    /** connection record this struct refers to */
+    conn_rec *c;
+    /** request record (if any) this struct refers to */
+    request_rec *r;
+    /** server config this struct refers to */
+    event_srv_cfg *sc;
+    /** is the current conn_rec suspended?  (disassociated with
+     * a particular MPM thread; for suspend_/resume_connection
+     * hooks)
+     */
+    int suspended;
+    /** memory pool to allocate from */
+    apr_pool_t *p;
+    /** bucket allocator */
+    apr_bucket_alloc_t *bucket_alloc;
+    /** poll file descriptor information */
+    apr_pollfd_t pfd;
+    /** public parts of the connection state */
+    conn_state_t pub;
+    /** chaining in defer_linger_chain */
+    struct event_conn_state_t *chain;
+};
 
-static serf_context_t *g_serf;
-#endif
+APR_RING_HEAD(timeout_head_t, event_conn_state_t);
+
+struct timeout_queue {
+    struct timeout_head_t head;
+    apr_interval_time_t timeout;
+    apr_uint32_t count;         /* for this queue */
+    apr_uint32_t *total;        /* for all chained/related queues */
+    struct timeout_queue *next; /* chaining */
+};
+/*
+ * Several timeout queues that use different timeouts, so that we always can
+ * simply append to the end.
+ *   write_completion_q uses vhost's TimeOut
+ *   keepalive_q        uses vhost's KeepAliveTimeOut
+ *   linger_q           uses MAX_SECS_TO_LINGER
+ *   short_linger_q     uses SECONDS_TO_LINGER
+ */
+static struct timeout_queue *write_completion_q,
+                            *keepalive_q,
+                            *linger_q,
+                            *short_linger_q;
+static volatile apr_time_t  queues_next_expiry;
+
+/* Prevent extra poll/wakeup calls for timeouts close in the future (queues
+ * have the granularity of a second anyway).
+ * XXX: Wouldn't 0.5s (instead of 0.1s) be "enough"?
+ */
+#define TIMEOUT_FUDGE_FACTOR apr_time_from_msec(100)
+
+/*
+ * Macros for accessing struct timeout_queue.
+ * For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held.
+ */
+static void TO_QUEUE_APPEND(struct timeout_queue *q, event_conn_state_t *el)
+{
+    apr_time_t q_expiry;
+    apr_time_t next_expiry;
+
+    APR_RING_INSERT_TAIL(&q->head, el, event_conn_state_t, timeout_list);
+    apr_atomic_inc32(q->total);
+    ++q->count;
+
+    /* Cheaply update the overall queues' next expiry according to the
+     * first entry of this queue (oldest), if necessary.
+     */
+    el = APR_RING_FIRST(&q->head);
+    q_expiry = el->queue_timestamp + q->timeout;
+    next_expiry = queues_next_expiry;
+    if (!next_expiry || next_expiry > q_expiry + TIMEOUT_FUDGE_FACTOR) {
+        queues_next_expiry = q_expiry;
+        /* Unblock the poll()ing listener for it to update its timeout. */
+        if (listener_is_wakeable) {
+            apr_pollset_wakeup(event_pollset);
+        }
+    }
+}
+
+static void TO_QUEUE_REMOVE(struct timeout_queue *q, event_conn_state_t *el)
+{
+    APR_RING_REMOVE(el, timeout_list);
+    APR_RING_ELEM_INIT(el, timeout_list);
+    apr_atomic_dec32(q->total);
+    --q->count;
+}
+
+static struct timeout_queue *TO_QUEUE_MAKE(apr_pool_t *p, apr_time_t t,
+                                           struct timeout_queue *ref)
+{
+    struct timeout_queue *q;
+                                           
+    q = apr_pcalloc(p, sizeof *q);
+    APR_RING_INIT(&q->head, event_conn_state_t, timeout_list);
+    q->total = (ref) ? ref->total : apr_pcalloc(p, sizeof *q->total);
+    q->timeout = t;
+
+    return q;
+}
+
+#define TO_QUEUE_ELEM_INIT(el) \
+    APR_RING_ELEM_INIT((el), timeout_list)
 
 /* The structure used to pass unique initialization info to each thread */
 typedef struct
 {
-    int pid;
-    int tid;
-    int sd;
+    int pslot;  /* process slot */
+    int tslot;  /* worker slot of the thread */
 } proc_info;
 
 /* Structure used to pass information to the thread responsible for
@@ -210,15 +349,11 @@ typedef enum
 {
     PT_CSD,
     PT_ACCEPT
-#if HAVE_SERF
-    , PT_SERF
-#endif
 } poll_type_e;
 
 typedef struct
 {
     poll_type_e type;
-    int bypass_push;
     void *baton;
 } listener_poll_type;
 
@@ -227,24 +362,55 @@ typedef struct
  * subsequent calls to pre-config hook
  */
 typedef struct event_retained_data {
+    ap_unixd_mpm_retained_data *mpm;
+
     int first_server_limit;
     int first_thread_limit;
-    int module_loads;
+    int sick_child_detected;
+    int maxclients_reported;
+    /*
+     * The max child slot ever assigned, preserved across restarts.  Necessary
+     * to deal with MaxRequestWorkers changes across AP_SIG_GRACEFUL restarts.
+     * We use this value to optimize routines that have to scan the entire
+     * scoreboard.
+     */
+    int max_daemons_limit;
+
+    /*
+     * All running workers, active and shutting down, including those that
+     * may be left from before a graceful restart.
+     * Not kept up-to-date when shutdown is pending.
+     */
+    int total_daemons;
+
+    /*
+     * idle_spawn_rate is the number of children that will be spawned on the
+     * next maintenance cycle if there aren't enough idle servers.  It is
+     * maintained per listeners bucket, doubled up to MAX_SPAWN_RATE, and
+     * reset only when a cycle goes by without the need to spawn.
+     */
+    int *idle_spawn_rate;
+#ifndef MAX_SPAWN_RATE
+#define MAX_SPAWN_RATE        (32)
+#endif
+    int hold_off_on_exponential_spawning;
 } event_retained_data;
 static event_retained_data *retained;
+typedef struct event_child_bucket {
+    ap_pod_t *pod;
+    ap_listen_rec *listeners;
+} event_child_bucket;
+static event_child_bucket *all_buckets, /* All listeners buckets */
+                          *my_bucket;   /* Current child bucket */
+
+struct event_srv_cfg_s {
+    struct timeout_queue *wc_q,
+                         *ka_q;
+};
 
 #define ID_FROM_CHILD_THREAD(c, t)    ((c * thread_limit) + t)
 
-/*
- * The max child slot ever assigned, preserved across restarts.  Necessary
- * to deal with MaxClients changes across AP_SIG_GRACEFUL restarts.  We
- * use this value to optimize routines that have to scan the entire
- * scoreboard.
- */
-static int max_daemons_limit = -1;
-
-static ap_event_pod_t *pod;
-
 /* The event MPM respects a couple of runtime flags that can aid
  * in debugging. Setting the -DNO_DETACH flag will prevent the root process
  * from detaching from its controlling terminal. Additionally, setting
@@ -285,15 +451,89 @@ static apr_os_thread_t *listener_os_thread;
  */
 static apr_socket_t **worker_sockets;
 
+static void disable_listensocks(int process_slot)
+{
+    int i;
+    for (i = 0; i < num_listensocks; i++) {
+        apr_pollset_remove(event_pollset, &listener_pollfd[i]);
+    }
+    ap_scoreboard_image->parent[process_slot].not_accepting = 1;
+}
+
+static void enable_listensocks(int process_slot)
+{
+    int i;
+    if (listener_may_exit) {
+        return;
+    }
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00457)
+                 "Accepting new connections again: "
+                 "%u active conns (%u lingering/%u clogged/%u suspended), "
+                 "%u idle workers",
+                 apr_atomic_read32(&connection_count),
+                 apr_atomic_read32(&lingering_count),
+                 apr_atomic_read32(&clogged_count),
+                 apr_atomic_read32(&suspended_count),
+                 ap_queue_info_get_idlers(worker_queue_info));
+    for (i = 0; i < num_listensocks; i++)
+        apr_pollset_add(event_pollset, &listener_pollfd[i]);
+    /*
+     * XXX: This is not yet optimal. If many workers suddenly become available,
+     * XXX: the parent may kill some processes off too soon.
+     */
+    ap_scoreboard_image->parent[process_slot].not_accepting = 0;
+}
+
+static void abort_socket_nonblocking(apr_socket_t *csd)
+{
+    apr_status_t rv;
+    apr_socket_timeout_set(csd, 0);
+#if defined(SOL_SOCKET) && defined(SO_LINGER)
+    /* This socket is over now, and we don't want to block nor linger
+     * anymore, so reset it. A normal close could still linger in the
+     * system, while RST is fast, nonblocking, and what the peer will
+     * get if it sends us further data anyway.
+     */
+    {
+        apr_os_sock_t osd = -1;
+        struct linger opt;
+        opt.l_onoff = 1;
+        opt.l_linger = 0; /* zero timeout is RST */
+        apr_os_sock_get(&osd, csd);
+        setsockopt(osd, SOL_SOCKET, SO_LINGER, (void *)&opt, sizeof opt);
+    }
+#endif
+    rv = apr_socket_close(csd);
+    if (rv != APR_SUCCESS) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00468)
+                     "error closing socket");
+        AP_DEBUG_ASSERT(0);
+    }
+}
+
 static void close_worker_sockets(void)
 {
     int i;
     for (i = 0; i < threads_per_child; i++) {
-        if (worker_sockets[i]) {
-            apr_socket_close(worker_sockets[i]);
+        apr_socket_t *csd = worker_sockets[i];
+        if (csd) {
             worker_sockets[i] = NULL;
+            abort_socket_nonblocking(csd);
         }
     }
+    for (;;) {
+        event_conn_state_t *cs = defer_linger_chain;
+        if (!cs) {
+            break;
+        }
+        if (apr_atomic_casptr((void *)&defer_linger_chain, cs->chain,
+                              cs) != cs) {
+            /* Race lost, try again */
+            continue;
+        }
+        cs->chain = NULL;
+        abort_socket_nonblocking(cs->pfd.desc.s);
+    }
 }
 
 static void wakeup_listener(void)
@@ -308,8 +548,13 @@ static void wakeup_listener(void)
         return;
     }
 
+    /* Unblock the listener if it's poll()ing */
+    if (listener_is_wakeable) {
+        apr_pollset_wakeup(event_pollset);
+    }
+
     /* unblock the listener if it's waiting for a worker */
-    ap_queue_info_term(worker_queue_info); 
+    ap_queue_info_term(worker_queue_info);
 
     /*
      * we should just be able to "kill(ap_my_pid, LISTENER_SIGNAL)" on all
@@ -335,7 +580,7 @@ static void signal_threads(int mode)
         return;
     }
     terminate_mode = mode;
-    mpm_state = AP_MPMQ_STOPPING;
+    retained->mpm->mpm_state = AP_MPMQ_STOPPING;
 
     /* in case we weren't called from the listener thread, wake up the
      * listener thread
@@ -358,7 +603,7 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
     *rv = APR_SUCCESS;
     switch (query_code) {
     case AP_MPMQ_MAX_DAEMON_USED:
-        *result = max_daemons_limit;
+        *result = retained->max_daemons_limit;
         break;
     case AP_MPMQ_IS_THREADED:
         *result = AP_MPMQ_STATIC;
@@ -369,9 +614,6 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
     case AP_MPMQ_IS_ASYNC:
         *result = 1;
         break;
-    case AP_MPMQ_HAS_SERF:
-        *result = 1;
-        break;
     case AP_MPMQ_HARD_LIMIT_DAEMONS:
         *result = server_limit;
         break;
@@ -397,13 +639,13 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
         *result = ap_max_requests_per_child;
         break;
     case AP_MPMQ_MAX_DAEMONS:
-        *result = ap_daemons_limit;
+        *result = active_daemons_limit;
         break;
     case AP_MPMQ_MPM_STATE:
-        *result = mpm_state;
+        *result = retained->mpm->mpm_state;
         break;
     case AP_MPMQ_GENERATION:
-        *result = my_generation;
+        *result = retained->mpm->my_generation;
         break;
     default:
         *rv = APR_ENOTIMPL;
@@ -412,10 +654,26 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
     return OK;
 }
 
-static apr_status_t event_note_child_killed(int childnum)
+static void event_note_child_killed(int childnum, pid_t pid, ap_generation_t gen)
 {
-    ap_scoreboard_image->parent[childnum].pid = 0;
-    return APR_SUCCESS;
+    if (childnum != -1) { /* child had a scoreboard slot? */
+        ap_run_child_status(ap_server_conf,
+                            ap_scoreboard_image->parent[childnum].pid,
+                            ap_scoreboard_image->parent[childnum].generation,
+                            childnum, MPM_CHILD_EXITED);
+        ap_scoreboard_image->parent[childnum].pid = 0;
+    }
+    else {
+        ap_run_child_status(ap_server_conf, pid, gen, -1, MPM_CHILD_EXITED);
+    }
+}
+
+static void event_note_child_started(int slot, pid_t pid)
+{
+    ap_scoreboard_image->parent[slot].pid = pid;
+    ap_run_child_status(ap_server_conf,
+                        ap_scoreboard_image->parent[slot].pid,
+                        retained->mpm->my_generation, slot, MPM_CHILD_STARTED);
 }
 
 static const char *event_get_name(void)
@@ -427,10 +685,15 @@ static const char *event_get_name(void)
 static void clean_child_exit(int code) __attribute__ ((noreturn));
 static void clean_child_exit(int code)
 {
-    mpm_state = AP_MPMQ_STOPPING;
+    retained->mpm->mpm_state = AP_MPMQ_STOPPING;
     if (pchild) {
         apr_pool_destroy(pchild);
     }
+
+    if (one_process) {
+        event_note_child_killed(/* slot */ 0, 0, 0);
+    }
+
     exit(code);
 }
 
@@ -443,196 +706,267 @@ static void just_die(int sig)
  * Connection structures and accounting...
  */
 
-/* volatile just in case */
-static int volatile shutdown_pending;
-static int volatile restart_pending;
-static int volatile is_graceful;
-static volatile int child_fatal;
-
-/*
- * ap_start_shutdown() and ap_start_restart(), below, are a first stab at
- * functions to initiate shutdown or restart without relying on signals.
- * Previously this was initiated in sig_term() and restart() signal handlers,
- * but we want to be able to start a shutdown/restart from other sources --
- * e.g. on Win32, from the service manager. Now the service manager can
- * call ap_start_shutdown() or ap_start_restart() as appropiate.  Note that
- * these functions can also be called by the child processes, since global
- * variables are no longer used to pass on the required action to the parent.
- *
- * These should only be called from the parent process itself, since the
- * parent process will use the shutdown_pending and restart_pending variables
- * to determine whether to shutdown or restart. The child process should
- * call signal_parent() directly to tell the parent to die -- this will
- * cause neither of those variable to be set, which the parent will
- * assume means something serious is wrong (which it will be, for the
- * child to force an exit) and so do an exit anyway.
- */
+static int child_fatal;
 
-static void ap_start_shutdown(int graceful)
+static apr_status_t decrement_connection_count(void *cs_)
 {
-    mpm_state = AP_MPMQ_STOPPING;
-    if (shutdown_pending == 1) {
-        /* Um, is this _probably_ not an error, if the user has
-         * tried to do a shutdown twice quickly, so we won't
-         * worry about reporting it.
-         */
-        return;
+    event_conn_state_t *cs = cs_;
+    switch (cs->pub.state) {
+        case CONN_STATE_LINGER_NORMAL:
+        case CONN_STATE_LINGER_SHORT:
+            apr_atomic_dec32(&lingering_count);
+            break;
+        case CONN_STATE_SUSPENDED:
+            apr_atomic_dec32(&suspended_count);
+            break;
+        default:
+            break;
+    }
+    /* Unblock the listener if it's waiting for connection_count = 0 */
+    if (!apr_atomic_dec32(&connection_count)
+             && listener_is_wakeable && listener_may_exit) {
+        apr_pollset_wakeup(event_pollset);
     }
-    shutdown_pending = 1;
-    is_graceful = graceful;
+    return APR_SUCCESS;
 }
 
-/* do a graceful restart if graceful == 1 */
-static void ap_start_restart(int graceful)
+static void notify_suspend(event_conn_state_t *cs)
 {
-    mpm_state = AP_MPMQ_STOPPING;
-    if (restart_pending == 1) {
-        /* Probably not an error - don't bother reporting it */
-        return;
+    ap_run_suspend_connection(cs->c, cs->r);
+    cs->suspended = 1;
+    cs->c->sbh = NULL;
+}
+
+static void notify_resume(event_conn_state_t *cs, ap_sb_handle_t *sbh)
+{
+    cs->c->sbh = sbh;
+    cs->suspended = 0;
+    ap_run_resume_connection(cs->c, cs->r);
+}
+
+/*
+ * Close our side of the connection, flushing data to the client first.
+ * Pre-condition: cs is not in any timeout queue and not in the pollset,
+ *                timeout_mutex is not locked
+ * return: 0 if connection is fully closed,
+ *         1 if connection is lingering
+ * May only be called by worker thread.
+ */
+static int start_lingering_close_blocking(event_conn_state_t *cs)
+{
+    apr_status_t rv;
+    struct timeout_queue *q;
+    apr_socket_t *csd = cs->pfd.desc.s;
+
+    if (ap_start_lingering_close(cs->c)) {
+        notify_suspend(cs);
+        apr_socket_close(csd);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
     }
-    restart_pending = 1;
-    is_graceful = graceful;
+
+#ifdef AP_DEBUG
+    {
+        rv = apr_socket_timeout_set(csd, 0);
+        AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+    }
+#else
+    apr_socket_timeout_set(csd, 0);
+#endif
+
+    cs->queue_timestamp = apr_time_now();
+    /*
+     * If some module requested a shortened waiting period, only wait for
+     * 2s (SECONDS_TO_LINGER). This is useful for mitigating certain
+     * DoS attacks.
+     */
+    if (apr_table_get(cs->c->notes, "short-lingering-close")) {
+        q = short_linger_q;
+        cs->pub.state = CONN_STATE_LINGER_SHORT;
+    }
+    else {
+        q = linger_q;
+        cs->pub.state = CONN_STATE_LINGER_NORMAL;
+    }
+    apr_atomic_inc32(&lingering_count);
+    notify_suspend(cs);
+
+    cs->pfd.reqevents = (
+            cs->pub.sense == CONN_SENSE_WANT_WRITE ? APR_POLLOUT :
+                    APR_POLLIN) | APR_POLLHUP | APR_POLLERR;
+    cs->pub.sense = CONN_SENSE_DEFAULT;
+    apr_thread_mutex_lock(timeout_mutex);
+    TO_QUEUE_APPEND(q, cs);
+    rv = apr_pollset_add(event_pollset, &cs->pfd);
+    if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
+        TO_QUEUE_REMOVE(q, cs);
+        apr_thread_mutex_unlock(timeout_mutex);
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03092)
+                     "start_lingering_close: apr_pollset_add failure");
+        apr_socket_close(cs->pfd.desc.s);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
+    }
+    apr_thread_mutex_unlock(timeout_mutex);
+    return 1;
 }
 
-static void sig_term(int sig)
+/*
+ * Defer flush and close of the connection by adding it to defer_linger_chain,
+ * for a worker to grab it and do the job (should that be blocking).
+ * Pre-condition: cs is not in any timeout queue and not in the pollset,
+ *                timeout_mutex is not locked
+ * return: 1 connection is alive (but aside and about to linger)
+ * May be called by listener thread.
+ */
+static int start_lingering_close_nonblocking(event_conn_state_t *cs)
 {
-    ap_start_shutdown(sig == AP_SIG_GRACEFUL_STOP);
+    event_conn_state_t *chain;
+    for (;;) {
+        cs->chain = chain = defer_linger_chain;
+        if (apr_atomic_casptr((void *)&defer_linger_chain, cs,
+                              chain) != chain) {
+            /* Race lost, try again */
+            continue;
+        }
+        return 1;
+    }
 }
 
-static void restart(int sig)
+/*
+ * forcibly close a lingering connection after the lingering period has
+ * expired
+ * Pre-condition: cs is not in any timeout queue and not in the pollset
+ * return: irrelevant (need same prototype as start_lingering_close)
+ */
+static int stop_lingering_close(event_conn_state_t *cs)
 {
-    ap_start_restart(sig == AP_SIG_GRACEFUL);
+    apr_socket_t *csd = ap_get_conn_socket(cs->c);
+    ap_log_error(APLOG_MARK, APLOG_TRACE4, 0, ap_server_conf,
+                 "socket reached timeout in lingering-close state");
+    abort_socket_nonblocking(csd);
+    ap_push_pool(worker_queue_info, cs->p);
+    if (dying)
+        ap_queue_interrupt_one(worker_queue);
+    return 0;
 }
 
-static void set_signals(void)
+/*
+ * This runs before any non-MPM cleanup code on the connection;
+ * if the connection is currently suspended as far as modules
+ * know, provide notification of resumption.
+ */
+static apr_status_t ptrans_pre_cleanup(void *dummy)
 {
-#ifndef NO_USE_SIGACTION
-    struct sigaction sa;
-#endif
+    event_conn_state_t *cs = dummy;
 
-    if (!one_process) {
-        ap_fatal_signal_setup(ap_server_conf, pconf);
+    if (cs->suspended) {
+        notify_resume(cs, NULL);
     }
+    return APR_SUCCESS;
+}
 
-#ifndef NO_USE_SIGACTION
-    sigemptyset(&sa.sa_mask);
-    sa.sa_flags = 0;
-
-    sa.sa_handler = sig_term;
-    if (sigaction(SIGTERM, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(SIGTERM)");
-#ifdef AP_SIG_GRACEFUL_STOP
-    if (sigaction(AP_SIG_GRACEFUL_STOP, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(" AP_SIG_GRACEFUL_STOP_STRING ")");
-#endif
-#ifdef SIGINT
-    if (sigaction(SIGINT, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(SIGINT)");
-#endif
-#ifdef SIGXCPU
-    sa.sa_handler = SIG_DFL;
-    if (sigaction(SIGXCPU, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(SIGXCPU)");
-#endif
-#ifdef SIGXFSZ
-    sa.sa_handler = SIG_DFL;
-    if (sigaction(SIGXFSZ, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(SIGXFSZ)");
-#endif
-#ifdef SIGPIPE
-    sa.sa_handler = SIG_IGN;
-    if (sigaction(SIGPIPE, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(SIGPIPE)");
-#endif
+/*
+ * event_pre_read_request() and event_request_cleanup() track the
+ * current r for a given connection.
+ */
+static apr_status_t event_request_cleanup(void *dummy)
+{
+    conn_rec *c = dummy;
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
 
-    /* we want to ignore HUPs and AP_SIG_GRACEFUL while we're busy
-     * processing one */
-    sigaddset(&sa.sa_mask, SIGHUP);
-    sigaddset(&sa.sa_mask, AP_SIG_GRACEFUL);
-    sa.sa_handler = restart;
-    if (sigaction(SIGHUP, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(SIGHUP)");
-    if (sigaction(AP_SIG_GRACEFUL, &sa, NULL) < 0)
-        ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
-                     "sigaction(" AP_SIG_GRACEFUL_STRING ")");
-#else
-    if (!one_process) {
-#ifdef SIGXCPU
-        apr_signal(SIGXCPU, SIG_DFL);
-#endif /* SIGXCPU */
-#ifdef SIGXFSZ
-        apr_signal(SIGXFSZ, SIG_DFL);
-#endif /* SIGXFSZ */
-    }
-
-    apr_signal(SIGTERM, sig_term);
-#ifdef SIGHUP
-    apr_signal(SIGHUP, restart);
-#endif /* SIGHUP */
-#ifdef AP_SIG_GRACEFUL
-    apr_signal(AP_SIG_GRACEFUL, restart);
-#endif /* AP_SIG_GRACEFUL */
-#ifdef AP_SIG_GRACEFUL_STOP
-     apr_signal(AP_SIG_GRACEFUL_STOP, sig_term);
-#endif /* AP_SIG_GRACEFUL_STOP */
-#ifdef SIGPIPE
-    apr_signal(SIGPIPE, SIG_IGN);
-#endif /* SIGPIPE */
+    cs->r = NULL;
+    return APR_SUCCESS;
+}
 
-#endif
+static void event_pre_read_request(request_rec *r, conn_rec *c)
+{
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
+
+    cs->r = r;
+    cs->sc = ap_get_module_config(ap_server_conf->module_config,
+                                  &mpm_event_module);
+    apr_pool_cleanup_register(r->pool, c, event_request_cleanup,
+                              apr_pool_cleanup_null);
 }
 
-/*****************************************************************
- * Child process main loop.
+/*
+ * event_post_read_request() tracks the current server config for a
+ * given request.
  */
+static int event_post_read_request(request_rec *r)
+{
+    conn_rec *c = r->connection;
+    event_conn_state_t *cs = ap_get_module_config(c->conn_config,
+                                                  &mpm_event_module);
+
+    /* To preserve legacy behaviour (consistent with other MPMs), use
+     * the keepalive timeout from the base server (first on this IP:port)
+     * when none is explicitly configured on this server.
+     */
+    if (r->server->keep_alive_timeout_set) {
+        cs->sc = ap_get_module_config(r->server->module_config,
+                                      &mpm_event_module);
+    }
+    else {
+        cs->sc = ap_get_module_config(c->base_server->module_config,
+                                      &mpm_event_module);
+    }
+    return OK;
+}
 
-static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock,
-                          conn_state_t * cs, int my_child_num,
+/*
+ * process one connection in the worker
+ */
+static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock,
+                          event_conn_state_t * cs, int my_child_num,
                           int my_thread_num)
 {
     conn_rec *c;
-    listener_poll_type *pt;
     long conn_id = ID_FROM_CHILD_THREAD(my_child_num, my_thread_num);
     int rc;
     ap_sb_handle_t *sbh;
 
+    /* XXX: This will cause unbounded mem usage for long lasting connections */
     ap_create_sb_handle(&sbh, p, my_child_num, my_thread_num);
 
     if (cs == NULL) {           /* This is a new connection */
-
-        cs = apr_pcalloc(p, sizeof(conn_state_t));
-
-        pt = apr_pcalloc(p, sizeof(*pt));
-
+        listener_poll_type *pt = apr_pcalloc(p, sizeof(*pt));
+        cs = apr_pcalloc(p, sizeof(event_conn_state_t));
         cs->bucket_alloc = apr_bucket_alloc_create(p);
         c = ap_run_create_connection(p, ap_server_conf, sock,
                                      conn_id, sbh, cs->bucket_alloc);
+        if (!c) {
+            ap_push_pool(worker_queue_info, p);
+            return;
+        }
+        apr_atomic_inc32(&connection_count);
+        apr_pool_cleanup_register(c->pool, cs, decrement_connection_count,
+                                  apr_pool_cleanup_null);
+        ap_set_module_config(c->conn_config, &mpm_event_module, cs);
         c->current_thread = thd;
         cs->c = c;
-        c->cs = cs;
+        c->cs = &(cs->pub);
         cs->p = p;
+        cs->sc = ap_get_module_config(ap_server_conf->module_config,
+                                      &mpm_event_module);
         cs->pfd.desc_type = APR_POLL_SOCKET;
         cs->pfd.reqevents = APR_POLLIN;
         cs->pfd.desc.s = sock;
         pt->type = PT_CSD;
-        pt->bypass_push = 1;
         pt->baton = cs;
         cs->pfd.client_data = pt;
-        APR_RING_ELEM_INIT(cs, timeout_list);
+        apr_pool_pre_cleanup_register(p, cs, ptrans_pre_cleanup);
+        TO_QUEUE_ELEM_INIT(cs);
 
         ap_update_vhost_given_ip(c);
 
         rc = ap_run_pre_connection(c, sock);
         if (rc != OK && rc != DONE) {
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                         "process_socket: connection aborted");
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(00469)
+                          "process_socket: connection aborted");
             c->aborted = 1;
         }
 
@@ -649,92 +983,108 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
          * When the accept filter is active, sockets are kept in the
          * kernel until a HTTP request is received.
          */
-        cs->state = CONN_STATE_READ_REQUEST_LINE;
+        cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
 
+        cs->pub.sense = CONN_SENSE_DEFAULT;
     }
     else {
         c = cs->c;
-        c->sbh = sbh;
-        pt = cs->pfd.client_data;
+        notify_resume(cs, sbh);
         c->current_thread = thd;
+        /* Subsequent request on a conn, and thread number is part of ID */
+        c->id = conn_id;
+
+        if (c->aborted) {
+            cs->pub.state = CONN_STATE_LINGER;
+        }
     }
 
-    if (c->clogging_input_filters && !c->aborted) {
-        /* Since we have an input filter which 'cloggs' the input stream,
-         * like mod_ssl, lets just do the normal read from input filters,
-         * like the Worker MPM does.
+    if (cs->pub.state == CONN_STATE_LINGER) {
+        /* do lingering close below */
+    }
+    else if (c->clogging_input_filters) {
+        /* Since we have an input filter which 'clogs' the input stream,
+         * like mod_ssl used to, lets just do the normal read from input
+         * filters, like the Worker MPM does. Filters that need to write
+         * where they would otherwise read, or read where they would
+         * otherwise write, should set the sense appropriately.
          */
+        apr_atomic_inc32(&clogged_count);
         ap_run_process_connection(c);
-        if (cs->state != CONN_STATE_SUSPENDED) {
-            cs->state = CONN_STATE_LINGER;
+        if (cs->pub.state != CONN_STATE_SUSPENDED) {
+            cs->pub.state = CONN_STATE_LINGER;
         }
+        apr_atomic_dec32(&clogged_count);
     }
-
+    else if (cs->pub.state == CONN_STATE_READ_REQUEST_LINE) {
 read_request:
-    if (cs->state == CONN_STATE_READ_REQUEST_LINE) {
-        if (!c->aborted) {
-            ap_run_process_connection(c);
+        ap_run_process_connection(c);
 
-            /* state will be updated upon return
-             * fall thru to either wait for readability/timeout or
-             * do lingering close
-             */
-        }
-        else {
-            cs->state = CONN_STATE_LINGER;
-        }
+        /* state will be updated upon return
+         * fall thru to either wait for readability/timeout or
+         * do lingering close
+         */
     }
 
-    if (cs->state == CONN_STATE_WRITE_COMPLETION) {
+    if (cs->pub.state == CONN_STATE_WRITE_COMPLETION) {
         ap_filter_t *output_filter = c->output_filters;
         apr_status_t rv;
+        ap_update_child_status(sbh, SERVER_BUSY_WRITE, NULL);
         while (output_filter->next != NULL) {
             output_filter = output_filter->next;
         }
         rv = output_filter->frec->filter_func.out_func(output_filter, NULL);
         if (rv != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf,
-                     "network write failure in core output filter");
-            cs->state = CONN_STATE_LINGER;
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(00470)
+                          "network write failure in core output filter");
+            cs->pub.state = CONN_STATE_LINGER;
         }
         else if (c->data_in_output_filters) {
             /* Still in WRITE_COMPLETION_STATE:
              * Set a write timeout for this connection, and let the
              * event thread poll for writeability.
              */
-            cs->expiration_time = ap_server_conf->timeout + apr_time_now();
+            cs->queue_timestamp = apr_time_now();
+            notify_suspend(cs);
+            cs->pfd.reqevents = (
+                    cs->pub.sense == CONN_SENSE_WANT_READ ? APR_POLLIN :
+                            APR_POLLOUT) | APR_POLLHUP | APR_POLLERR;
+            cs->pub.sense = CONN_SENSE_DEFAULT;
             apr_thread_mutex_lock(timeout_mutex);
-            APR_RING_INSERT_TAIL(&timeout_head, cs, conn_state_t, timeout_list);
-            apr_thread_mutex_unlock(timeout_mutex);
-            pt->bypass_push = 0;
-            cs->pfd.reqevents = APR_POLLOUT | APR_POLLHUP | APR_POLLERR;
+            TO_QUEUE_APPEND(cs->sc->wc_q, cs);
             rc = apr_pollset_add(event_pollset, &cs->pfd);
-            return 1;
+            if (rc != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rc)) {
+                TO_QUEUE_REMOVE(cs->sc->wc_q, cs);
+                apr_thread_mutex_unlock(timeout_mutex);
+                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(03465)
+                             "process_socket: apr_pollset_add failure for "
+                             "write completion");
+                apr_socket_close(cs->pfd.desc.s);
+                ap_push_pool(worker_queue_info, cs->p);
+            }
+            else {
+                apr_thread_mutex_unlock(timeout_mutex);
+            }
+            return;
         }
         else if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted ||
-            listener_may_exit) {
-            c->cs->state = CONN_STATE_LINGER;
+                 listener_may_exit) {
+            cs->pub.state = CONN_STATE_LINGER;
         }
         else if (c->data_in_input_filters) {
-            cs->state = CONN_STATE_READ_REQUEST_LINE;
+            cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
             goto read_request;
         }
         else {
-            cs->state = CONN_STATE_CHECK_REQUEST_LINE_READABLE;
+            cs->pub.state = CONN_STATE_CHECK_REQUEST_LINE_READABLE;
         }
     }
 
-    if (cs->state == CONN_STATE_LINGER) {
-        ap_lingering_close(c);
-        apr_pool_clear(p);
-        ap_push_pool(worker_queue_info, p);
-        return 0;
+    if (cs->pub.state == CONN_STATE_LINGER) {
+        start_lingering_close_blocking(cs);
     }
-    else if (cs->state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
-        apr_status_t rc;
-        listener_poll_type *pt = (listener_poll_type *) cs->pfd.client_data;
-
-        /* It greatly simplifies the logic to use a single timeout value here
+    else if (cs->pub.state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
+        /* It greatly simplifies the logic to use a single timeout value per q
          * because the new element can just be added to the end of the list and
          * it will stay sorted in expiration time sequence.  If brand new
          * sockets are sent to the event thread for a readability check, this
@@ -742,36 +1092,66 @@ read_request:
          * timeout today.  With a normal client, the socket will be readable in
          * a few milliseconds anyway.
          */
-        cs->expiration_time = ap_server_conf->keep_alive_timeout +
-                              apr_time_now();
-        apr_thread_mutex_lock(timeout_mutex);
-        APR_RING_INSERT_TAIL(&keepalive_timeout_head, cs, conn_state_t, timeout_list);
-        apr_thread_mutex_unlock(timeout_mutex);
+        cs->queue_timestamp = apr_time_now();
+        notify_suspend(cs);
 
-        pt->bypass_push = 0;
         /* Add work to pollset. */
         cs->pfd.reqevents = APR_POLLIN;
+        apr_thread_mutex_lock(timeout_mutex);
+        TO_QUEUE_APPEND(cs->sc->ka_q, cs);
         rc = apr_pollset_add(event_pollset, &cs->pfd);
-
-        if (rc != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
-                         "process_socket: apr_pollset_add failure");
-            AP_DEBUG_ASSERT(rc == APR_SUCCESS);
+        if (rc != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rc)) {
+            TO_QUEUE_REMOVE(cs->sc->ka_q, cs);
+            apr_thread_mutex_unlock(timeout_mutex);
+            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(03093)
+                         "process_socket: apr_pollset_add failure for "
+                         "keep alive");
+            apr_socket_close(cs->pfd.desc.s);
+            ap_push_pool(worker_queue_info, cs->p);
+            return;
         }
+        apr_thread_mutex_unlock(timeout_mutex);
+    }
+    else if (cs->pub.state == CONN_STATE_SUSPENDED) {
+        apr_atomic_inc32(&suspended_count);
+        notify_suspend(cs);
     }
-    return 1;
 }
 
-/* requests_this_child has gone to zero or below.  See if the admin coded
-   "MaxRequestsPerChild 0", and keep going in that case.  Doing it this way
+/* conns_this_child has gone to zero or below.  See if the admin coded
+   "MaxConnectionsPerChild 0", and keep going in that case.  Doing it this way
    simplifies the hot path in worker_thread */
 static void check_infinite_requests(void)
 {
     if (ap_max_requests_per_child) {
+        ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                     "Stopping process due to MaxConnectionsPerChild");
         signal_threads(ST_GRACEFUL);
     }
     else {
-        requests_this_child = INT_MAX;  /* keep going */
+        /* keep going */
+        conns_this_child = APR_INT32_MAX;
+    }
+}
+
+static void close_listeners(int process_slot, int *closed)
+{
+    if (!*closed) {
+        int i;
+        disable_listensocks(process_slot);
+        ap_close_listeners_ex(my_bucket->listeners);
+        *closed = 1;
+        dying = 1;
+        ap_scoreboard_image->parent[process_slot].quiescing = 1;
+        for (i = 0; i < threads_per_child; ++i) {
+            ap_update_child_status_from_indexes(process_slot, i,
+                                                SERVER_GRACEFUL, NULL);
+        }
+        /* wake up the main thread */
+        kill(ap_my_pid, SIGTERM);
+
+        ap_free_idle_pools(worker_queue_info);
+        ap_queue_interrupt_all(worker_queue);
     }
 }
 
@@ -796,44 +1176,17 @@ static void dummy_signal_handler(int sig)
 }
 
 
-#if HAVE_SERF
-static apr_status_t s_socket_add(void *user_baton,
-                                 apr_pollfd_t *pfd,
-                                 void *serf_baton)
-{
-    s_baton_t *s = (s_baton_t*)user_baton;
-    /* XXXXX: recycle listener_poll_types */
-    listener_poll_type *pt = malloc(sizeof(*pt));
-    pt->type = PT_SERF;
-    pt->baton = serf_baton;
-    pfd->client_data = pt;
-    return apr_pollset_add(s->pollset, pfd);
-}
-
-static apr_status_t s_socket_remove(void *user_baton,
-                                    apr_pollfd_t *pfd,
-                                    void *serf_baton)
-{
-    s_baton_t *s = (s_baton_t*)user_baton;
-    listener_poll_type *pt = pfd->client_data;
-    free(pt);
-    return apr_pollset_remove(s->pollset, pfd);
-}
-#endif
-
 static apr_status_t init_pollset(apr_pool_t *p)
 {
-#if HAVE_SERF
-    s_baton_t *baton = NULL;
-#endif
     ap_listen_rec *lr;
     listener_poll_type *pt;
+    int i = 0;
 
-    APR_RING_INIT(&timeout_head, conn_state_t, timeout_list);
-    APR_RING_INIT(&keepalive_timeout_head, conn_state_t, timeout_list);
-
-    for (lr = ap_listeners; lr != NULL; lr = lr->next) {
-        apr_pollfd_t *pfd = apr_palloc(p, sizeof(*pfd));
+    listener_pollfd = apr_palloc(p, sizeof(apr_pollfd_t) * num_listensocks);
+    for (lr = my_bucket->listeners; lr != NULL; lr = lr->next, i++) {
+        apr_pollfd_t *pfd;
+        AP_DEBUG_ASSERT(i < num_listensocks);
+        pfd = &listener_pollfd[i];
         pt = apr_pcalloc(p, sizeof(*pt));
         pfd->desc_type = APR_POLL_SOCKET;
         pfd->desc.s = lr->sd;
@@ -850,21 +1203,6 @@ static apr_status_t init_pollset(apr_pool_t *p)
         lr->accept_func = ap_unixd_accept;
     }
 
-#if HAVE_SERF
-    baton = apr_pcalloc(p, sizeof(*baton));
-    baton->pollset = event_pollset;
-    /* TODO: subpools, threads, reuse, etc.  -- currently use malloc() inside :( */
-    baton->pool = p;
-
-    g_serf = serf_context_create_ex(baton,
-                                    s_socket_add,
-                                    s_socket_remove, p);
-
-    ap_register_provider(p, "mpm_serf",
-                         "instance", "0", g_serf);
-
-#endif
-
     return APR_SUCCESS;
 }
 
@@ -873,109 +1211,126 @@ static apr_status_t push_timer2worker(timer_event_t* te)
     return ap_queue_push_timer(worker_queue, te);
 }
 
-static apr_status_t push2worker(const apr_pollfd_t * pfd,
-                                apr_pollset_t * pollset)
+/*
+ * Pre-condition: cs is neither in event_pollset nor a timeout queue
+ * this function may only be called by the listener
+ */
+static apr_status_t push2worker(event_conn_state_t *cs, apr_socket_t *csd,
+                                apr_pool_t *ptrans)
 {
-    listener_poll_type *pt = (listener_poll_type *) pfd->client_data;
-    conn_state_t *cs = (conn_state_t *) pt->baton;
     apr_status_t rc;
 
-    if (pt->bypass_push) {
-        return APR_SUCCESS;
+    if (cs) {
+        csd = cs->pfd.desc.s;
+        ptrans = cs->p;
     }
-
-    pt->bypass_push = 1;
-
-    rc = apr_pollset_remove(pollset, pfd);
-
-    /*
-     * Some of the pollset backends, like KQueue or Epoll
-     * automagically remove the FD if the socket is closed,
-     * therefore, we can accept _SUCCESS or _NOTFOUND,
-     * and we still want to keep going
-     */
-    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
-        cs->state = CONN_STATE_LINGER;
-    }
-
-    rc = ap_queue_push(worker_queue, cs->pfd.desc.s, cs, cs->p);
+    rc = ap_queue_push(worker_queue, csd, cs, ptrans);
     if (rc != APR_SUCCESS) {
+        ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf, APLOGNO(00471)
+                     "push2worker: ap_queue_push failed");
         /* trash the connection; we couldn't queue the connected
          * socket to a worker
          */
-        apr_bucket_alloc_destroy(cs->bucket_alloc);
-        apr_socket_close(cs->pfd.desc.s);
-        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                     ap_server_conf, "push2worker: ap_queue_push failed");
-        apr_pool_clear(cs->p);
-        ap_push_pool(worker_queue_info, cs->p);
+        if (csd) {
+            abort_socket_nonblocking(csd);
+        }
+        if (ptrans) {
+            ap_push_pool(worker_queue_info, ptrans);
+        }
+        signal_threads(ST_GRACEFUL);
     }
 
     return rc;
 }
 
 /* get_worker:
- *     reserve a worker thread, block if all are currently busy.
- *     this prevents the worker queue from overflowing and lets
- *     other processes accept new connections in the mean time.
+ *     If *have_idle_worker_p == 0, reserve a worker thread, and set
+ *     *have_idle_worker_p = 1.
+ *     If *have_idle_worker_p is already 1, will do nothing.
+ *     If blocking == 1, block if all workers are currently busy.
+ *     If no worker was available immediately, will set *all_busy to 1.
+ *     XXX: If there are no workers, we should not block immediately but
+ *     XXX: close all keep-alive connections first.
  */
-static int get_worker(int *have_idle_worker_p)
+static void get_worker(int *have_idle_worker_p, int blocking, int *all_busy)
 {
     apr_status_t rc;
 
-    if (!*have_idle_worker_p) {
-        rc = ap_queue_info_wait_for_idler(worker_queue_info);
-
-        if (rc == APR_SUCCESS) {
-            *have_idle_worker_p = 1;
-            return 1;
-        }
-        else {
-            if (!APR_STATUS_IS_EOF(rc)) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
-                             "ap_queue_info_wait_for_idler failed.  "
-                             "Attempting to shutdown process gracefully");
-                signal_threads(ST_GRACEFUL);
-            }
-            return 0;
-        }
-    }
-    else {
+    if (*have_idle_worker_p) {
         /* already reserved a worker thread - must have hit a
          * transient error on a previous pass
          */
-        return 1;
+        return;
     }
-}
 
-/* XXXXXX: Convert to skiplist or other better data structure 
- * (yes, this is VERY VERY VERY VERY BAD)
- */
+    if (blocking)
+        rc = ap_queue_info_wait_for_idler(worker_queue_info, all_busy);
+    else
+        rc = ap_queue_info_try_get_idler(worker_queue_info);
+
+    if (rc == APR_SUCCESS || APR_STATUS_IS_EOF(rc)) {
+        *have_idle_worker_p = 1;
+    }
+    else if (!blocking && rc == APR_EAGAIN) {
+        *all_busy = 1;
+    }
+    else {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(00472)
+                     "ap_queue_info_wait_for_idler failed.  "
+                     "Attempting to shutdown process gracefully");
+        signal_threads(ST_GRACEFUL);
+    }
+}
 
 /* Structures to reuse */
 static APR_RING_HEAD(timer_free_ring_t, timer_event_t) timer_free_ring;
-/* Active timers */
-static APR_RING_HEAD(timer_ring_t, timer_event_t) timer_ring;
 
-static apr_thread_mutex_t *g_timer_ring_mtx;
+static apr_skiplist *timer_skiplist;
+static volatile apr_time_t timers_next_expiry;
+
+/* Same goal as for TIMEOUT_FUDGE_FACTOR (avoid extra poll calls), but applied
+ * to timers. Since their timeouts are custom (user defined), we can't be too
+ * approximative here (hence using 0.01s).
+ */
+#define EVENT_FUDGE_FACTOR apr_time_from_msec(10)
+
+/* The following compare function is used by apr_skiplist_insert() to keep the
+ * elements (timers) sorted and provide O(log n) complexity (this is also true
+ * for apr_skiplist_{find,remove}(), but those are not used in MPM event where
+ * inserted timers are not searched nor removed, but with apr_skiplist_pop()
+ * which does use any compare function).  It is meant to return 0 when a == b,
+ * <0 when a < b, and >0 when a > b.  However apr_skiplist_insert() will not
+ * add duplicates (i.e. a == b), and apr_skiplist_add() is only available in
+ * APR 1.6, yet multiple timers could possibly be created in the same micro-
+ * second (duplicates with regard to apr_time_t); therefore we implement the
+ * compare function to return +1 instead of 0 when compared timers are equal,
+ * thus duplicates are still added after each other (in order of insertion).
+ */
+static int timer_comp(void *a, void *b)
+{
+    apr_time_t t1 = (apr_time_t) ((timer_event_t *)a)->when;
+    apr_time_t t2 = (apr_time_t) ((timer_event_t *)b)->when;
+    AP_DEBUG_ASSERT(t1);
+    AP_DEBUG_ASSERT(t2);
+    return ((t1 < t2) ? -1 : 1);
+}
+
+static apr_thread_mutex_t *g_timer_skiplist_mtx;
 
 static apr_status_t event_register_timed_callback(apr_time_t t,
                                                   ap_mpm_callback_fn_t *cbfn,
                                                   void *baton)
 {
-    int inserted = 0;
-    timer_event_t *ep;
     timer_event_t *te;
     /* oh yeah, and make locking smarter/fine grained. */
-    apr_thread_mutex_lock(g_timer_ring_mtx);
+    apr_thread_mutex_lock(g_timer_skiplist_mtx);
 
     if (!APR_RING_EMPTY(&timer_free_ring, timer_event_t, link)) {
         te = APR_RING_FIRST(&timer_free_ring);
         APR_RING_REMOVE(te, link);
     }
     else {
-        /* XXXXX: lol, pool allocation without a context from any thread.Yeah. Right. MPMs Suck. */
-        te = malloc(sizeof(timer_event_t));
+        te = apr_skiplist_alloc(timer_skiplist, sizeof(timer_event_t));
         APR_RING_ELEM_INIT(te, link);
     }
 
@@ -984,58 +1339,190 @@ static apr_status_t event_register_timed_callback(apr_time_t t,
     /* XXXXX: optimize */
     te->when = t + apr_time_now();
 
-    /* Okay, insert sorted by when.. */
-    for (ep = APR_RING_FIRST(&timer_ring);
-         ep != APR_RING_SENTINEL(&timer_ring,
-                                 timer_event_t, link);
-         ep = APR_RING_NEXT(ep, link))
-    {
-        if (ep->when > te->when) {
-            inserted = 1;
-            APR_RING_INSERT_BEFORE(ep, te, link);
-            break;
+    { 
+        apr_time_t next_expiry;
+
+        /* Okay, add sorted by when.. */
+        apr_skiplist_insert(timer_skiplist, te);
+
+        /* Cheaply update the overall timers' next expiry according to
+         * this event, if necessary.
+         */
+        next_expiry = timers_next_expiry;
+        if (!next_expiry || next_expiry > te->when + EVENT_FUDGE_FACTOR) {
+            timers_next_expiry = te->when;
+            /* Unblock the poll()ing listener for it to update its timeout. */
+            if (listener_is_wakeable) {
+                apr_pollset_wakeup(event_pollset);
+            }
         }
     }
-    
-    if (!inserted) {
-        APR_RING_INSERT_TAIL(&timer_ring, te, timer_event_t, link);
-    }
 
-    apr_thread_mutex_unlock(g_timer_ring_mtx);
+    apr_thread_mutex_unlock(g_timer_skiplist_mtx);
 
     return APR_SUCCESS;
 }
 
+
+/*
+ * Close socket and clean up if remote closed its end while we were in
+ * lingering close.
+ * Only to be called in the listener thread;
+ * Pre-condition: cs is in one of the linger queues and in the pollset
+ */
+static void process_lingering_close(event_conn_state_t *cs, const apr_pollfd_t *pfd)
+{
+    apr_socket_t *csd = ap_get_conn_socket(cs->c);
+    char dummybuf[2048];
+    apr_size_t nbytes;
+    apr_status_t rv;
+    struct timeout_queue *q;
+    q = (cs->pub.state == CONN_STATE_LINGER_SHORT) ? short_linger_q : linger_q;
+
+    /* socket is already in non-blocking state */
+    do {
+        nbytes = sizeof(dummybuf);
+        rv = apr_socket_recv(csd, dummybuf, &nbytes);
+    } while (rv == APR_SUCCESS);
+
+    if (APR_STATUS_IS_EAGAIN(rv)) {
+        return;
+    }
+
+    apr_thread_mutex_lock(timeout_mutex);
+    TO_QUEUE_REMOVE(q, cs);
+    rv = apr_pollset_remove(event_pollset, pfd);
+    apr_thread_mutex_unlock(timeout_mutex);
+    AP_DEBUG_ASSERT(rv == APR_SUCCESS ||  APR_STATUS_IS_NOTFOUND(rv));
+
+    rv = apr_socket_close(csd);
+    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+    ap_push_pool(worker_queue_info, cs->p);
+    if (dying)
+        ap_queue_interrupt_one(worker_queue);
+}
+
+/* call 'func' for all elements of 'q' with timeout less than 'timeout_time'.
+ * Pre-condition: timeout_mutex must already be locked
+ * Post-condition: timeout_mutex will be locked again
+ */
+static void process_timeout_queue(struct timeout_queue *q,
+                                  apr_time_t timeout_time,
+                                  int (*func)(event_conn_state_t *))
+{
+    apr_uint32_t total = 0, count;
+    event_conn_state_t *first, *cs, *last;
+    struct timeout_head_t trash;
+    struct timeout_queue *qp;
+    apr_status_t rv;
+
+    if (!apr_atomic_read32(q->total)) {
+        return;
+    }
+
+    APR_RING_INIT(&trash, event_conn_state_t, timeout_list);
+    for (qp = q; qp; qp = qp->next) {
+        count = 0;
+        cs = first = last = APR_RING_FIRST(&qp->head);
+        while (cs != APR_RING_SENTINEL(&qp->head, event_conn_state_t,
+                                       timeout_list)) {
+            /* Trash the entry if:
+             * - no timeout_time was given (asked for all), or
+             * - it expired (according to the queue timeout), or
+             * - the system clock skewed in the past: no entry should be
+             *   registered above the given timeout_time (~now) + the queue
+             *   timeout, we won't keep any here (eg. for centuries).
+             *
+             * Otherwise stop, no following entry will match thanks to the
+             * single timeout per queue (entries are added to the end!).
+             * This allows maintenance in O(1).
+             */
+            if (timeout_time
+                    && cs->queue_timestamp + qp->timeout > timeout_time
+                    && cs->queue_timestamp < timeout_time + qp->timeout) {
+                /* Since this is the next expiring of this queue, update the
+                 * overall queues' next expiry if it's later than this one.
+                 */
+                apr_time_t q_expiry = cs->queue_timestamp + qp->timeout;
+                apr_time_t next_expiry = queues_next_expiry;
+                if (!next_expiry || next_expiry > q_expiry) {
+                    queues_next_expiry = q_expiry;
+                }
+                break;
+            }
+
+            last = cs;
+            rv = apr_pollset_remove(event_pollset, &cs->pfd);
+            if (rv != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rv)) {
+                ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, cs->c, APLOGNO(00473)
+                              "apr_pollset_remove failed");
+            }
+            cs = APR_RING_NEXT(cs, timeout_list);
+            count++;
+        }
+        if (!count)
+            continue;
+
+        APR_RING_UNSPLICE(first, last, timeout_list);
+        APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
+                             timeout_list);
+        AP_DEBUG_ASSERT(apr_atomic_read32(q->total) >= count);
+        apr_atomic_sub32(q->total, count);
+        qp->count -= count;
+        total += count;
+    }
+    if (!total)
+        return;
+
+    apr_thread_mutex_unlock(timeout_mutex);
+    first = APR_RING_FIRST(&trash);
+    do {
+        cs = APR_RING_NEXT(first, timeout_list);
+        TO_QUEUE_ELEM_INIT(first);
+        func(first);
+        first = cs;
+    } while (--total);
+    apr_thread_mutex_lock(timeout_mutex);
+}
+
+static void process_keepalive_queue(apr_time_t timeout_time)
+{
+    /* If all workers are busy, we kill older keep-alive connections so
+     * that they may connect to another process.
+     */
+    if (!timeout_time) {
+        ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                     "All workers are busy or dying, will close %u "
+                     "keep-alive connections",
+                     apr_atomic_read32(keepalive_q->total));
+    }
+    process_timeout_queue(keepalive_q, timeout_time,
+                          start_lingering_close_nonblocking);
+}
+
 static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
 {
-    timer_event_t *ep;
     timer_event_t *te;
     apr_status_t rc;
     proc_info *ti = dummy;
-    int process_slot = ti->pid;
+    int process_slot = ti->pslot;
+    struct process_score *ps = ap_get_scoreboard_process(process_slot);
     apr_pool_t *tpool = apr_thread_pool_get(thd);
     void *csd = NULL;
     apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
     ap_listen_rec *lr;
     int have_idle_worker = 0;
-    conn_state_t *cs;
     const apr_pollfd_t *out_pfd;
     apr_int32_t num = 0;
-    apr_time_t time_now = 0;
     apr_interval_time_t timeout_interval;
-    apr_time_t timeout_time;
+    apr_time_t timeout_time = 0, now, last_log;
     listener_poll_type *pt;
+    int closed = 0, listeners_disabled = 0;
 
+    last_log = apr_time_now();
     free(ti);
 
-    /* the following times out events that are really close in the future
-     *   to prevent extra poll calls
-     *
-     * current value is .1 second
-     */
-#define TIMEOUT_FUDGE_FACTOR 100000
-#define EVENT_FUDGE_FACTOR 10000
-
     rc = init_pollset(tpool);
     if (rc != APR_SUCCESS) {
         ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
@@ -1051,177 +1538,258 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     unblock_signal(LISTENER_SIGNAL);
     apr_signal(LISTENER_SIGNAL, dummy_signal_handler);
 
-    while (!listener_may_exit) {
+    for (;;) {
+        int workers_were_busy = 0;
 
-        if (requests_this_child <= 0) {
-            check_infinite_requests();
+        if (listener_may_exit) {
+            close_listeners(process_slot, &closed);
+            if (terminate_mode == ST_UNGRACEFUL
+                || apr_atomic_read32(&connection_count) == 0)
+                break;
         }
 
+        if (conns_this_child <= 0)
+            check_infinite_requests();
 
-        {
-            apr_time_t now = apr_time_now();
-            apr_thread_mutex_lock(g_timer_ring_mtx);
+        now = apr_time_now();
+        if (APLOGtrace6(ap_server_conf)) {
+            /* trace log status every second */
+            if (now - last_log > apr_time_from_sec(1)) {
+                last_log = now;
+                apr_thread_mutex_lock(timeout_mutex);
+                ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
+                             "connections: %u (clogged: %u write-completion: %d "
+                             "keep-alive: %d lingering: %d suspended: %u)",
+                             apr_atomic_read32(&connection_count),
+                             apr_atomic_read32(&clogged_count),
+                             apr_atomic_read32(write_completion_q->total),
+                             apr_atomic_read32(keepalive_q->total),
+                             apr_atomic_read32(&lingering_count),
+                             apr_atomic_read32(&suspended_count));
+                if (dying) {
+                    ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
+                                 "%u/%u workers shutdown",
+                                 apr_atomic_read32(&threads_shutdown),
+                                 threads_per_child);
+                }
+                apr_thread_mutex_unlock(timeout_mutex);
+            }
+        }
+
+        /* Start with an infinite poll() timeout and update it according to
+         * the next expiring timer or queue entry. If there are none, either
+         * the listener is wakeable and it can poll() indefinitely until a wake
+         * up occurs, otherwise periodic checks (maintenance, shutdown, ...)
+         * must be performed.
+         */
+        timeout_interval = -1;
 
-            if (!APR_RING_EMPTY(&timer_ring, timer_event_t, link)) {
-                te = APR_RING_FIRST(&timer_ring);
-                if (te->when > now) {
+        /* Push expired timers to a worker, the first remaining one determines
+         * the maximum time to poll() below, if any.
+         */
+        timeout_time = timers_next_expiry;
+        if (timeout_time && timeout_time < now + EVENT_FUDGE_FACTOR) {
+            apr_thread_mutex_lock(g_timer_skiplist_mtx);
+            while ((te = apr_skiplist_peek(timer_skiplist))) {
+                if (te->when > now + EVENT_FUDGE_FACTOR) {
+                    timers_next_expiry = te->when;
                     timeout_interval = te->when - now;
+                    break;
                 }
-                else {
-                    timeout_interval = 1;
-                }
+                apr_skiplist_pop(timer_skiplist, NULL);
+                push_timer2worker(te);
             }
-            else {
-                timeout_interval = apr_time_from_msec(100);
+            if (!te) {
+                timers_next_expiry = 0;
             }
-            apr_thread_mutex_unlock(g_timer_ring_mtx);
+            apr_thread_mutex_unlock(g_timer_skiplist_mtx);
         }
 
-#if HAVE_SERF
-        rc = serf_context_prerun(g_serf);
-        if (rc != APR_SUCCESS) {
-            /* TOOD: what should do here? ugh. */
+        /* Same for queues, use their next expiry, if any. */
+        timeout_time = queues_next_expiry;
+        if (timeout_time
+                && (timeout_interval < 0
+                    || timeout_time <= now
+                    || timeout_interval > timeout_time - now)) {
+            timeout_interval = timeout_time > now ? timeout_time - now : 1;
         }
-        
-#endif
-        rc = apr_pollset_poll(event_pollset, timeout_interval, &num,
-                              &out_pfd);
 
+        /* When non-wakeable, don't wait more than 100 ms, in any case. */
+#define NON_WAKEABLE_POLL_TIMEOUT apr_time_from_msec(100)
+        if (!listener_is_wakeable
+                && (timeout_interval < 0
+                    || timeout_interval > NON_WAKEABLE_POLL_TIMEOUT)) {
+            timeout_interval = NON_WAKEABLE_POLL_TIMEOUT;
+        }
+
+        rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd);
         if (rc != APR_SUCCESS) {
             if (APR_STATUS_IS_EINTR(rc)) {
-                continue;
+                /* Woken up, if we are exiting we must fall through to kill
+                 * kept-alive connections, otherwise we only need to update
+                 * timeouts (logic is above, so restart the loop).
+                 */
+                if (!listener_may_exit) {
+                    continue;
+                }
+                timeout_time = 0;
             }
-            if (!APR_STATUS_IS_TIMEUP(rc)) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+            else if (!APR_STATUS_IS_TIMEUP(rc)) {
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf,
                              "apr_pollset_poll failed.  Attempting to "
                              "shutdown process gracefully");
                 signal_threads(ST_GRACEFUL);
             }
+            num = 0;
         }
 
-        if (listener_may_exit)
-            break;
-
-        {
-            apr_time_t now = apr_time_now();
-            apr_thread_mutex_lock(g_timer_ring_mtx);
-            for (ep = APR_RING_FIRST(&timer_ring);
-                 ep != APR_RING_SENTINEL(&timer_ring,
-                                         timer_event_t, link);
-                 ep = APR_RING_FIRST(&timer_ring))
-            {
-                if (ep->when < now + EVENT_FUDGE_FACTOR) {
-                    APR_RING_REMOVE(ep, link);
-                    push_timer2worker(ep);
-                }
-                else {
-                    break;
-                }
-            }
-            apr_thread_mutex_unlock(g_timer_ring_mtx);
+        if (listener_may_exit) {
+            close_listeners(process_slot, &closed);
+            if (terminate_mode == ST_UNGRACEFUL
+                || apr_atomic_read32(&connection_count) == 0)
+                break;
         }
 
-        while (num && get_worker(&have_idle_worker)) {
+        while (num) {
             pt = (listener_poll_type *) out_pfd->client_data;
             if (pt->type == PT_CSD) {
                 /* one of the sockets is readable */
-                cs = (conn_state_t *) pt->baton;
-                switch (cs->state) {
+                event_conn_state_t *cs = (event_conn_state_t *) pt->baton;
+                struct timeout_queue *remove_from_q = cs->sc->wc_q;
+                int blocking = 1;
+
+                switch (cs->pub.state) {
                 case CONN_STATE_CHECK_REQUEST_LINE_READABLE:
-                    cs->state = CONN_STATE_READ_REQUEST_LINE;
-                    break;
+                    cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
+                    remove_from_q = cs->sc->ka_q;
+                    /* don't wait for a worker for a keepalive request */
+                    blocking = 0;
+                    /* FALL THROUGH */
                 case CONN_STATE_WRITE_COMPLETION:
+                    get_worker(&have_idle_worker, blocking,
+                               &workers_were_busy);
+                    apr_thread_mutex_lock(timeout_mutex);
+                    TO_QUEUE_REMOVE(remove_from_q, cs);
+                    rc = apr_pollset_remove(event_pollset, &cs->pfd);
+                    apr_thread_mutex_unlock(timeout_mutex);
+
+                    /*
+                     * Some of the pollset backends, like KQueue or Epoll
+                     * automagically remove the FD if the socket is closed,
+                     * therefore, we can accept _SUCCESS or _NOTFOUND,
+                     * and we still want to keep going
+                     */
+                    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
+                        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+                                     APLOGNO(03094) "pollset remove failed");
+                        start_lingering_close_nonblocking(cs);
+                        break;
+                    }
+
+                    /* If we didn't get a worker immediately for a keep-alive
+                     * request, we close the connection, so that the client can
+                     * re-connect to a different process.
+                     */
+                    if (!have_idle_worker) {
+                        start_lingering_close_nonblocking(cs);
+                    }
+                    else if (push2worker(cs, NULL, NULL) == APR_SUCCESS) {
+                        have_idle_worker = 0;
+                    }
                     break;
-                default:
-                    ap_log_error(APLOG_MARK, APLOG_ERR, rc,
-                                 ap_server_conf,
-                                 "event_loop: unexpected state %d",
-                                 cs->state);
-                    AP_DEBUG_ASSERT(0);
-                }
 
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_REMOVE(cs, timeout_list);
-                apr_thread_mutex_unlock(timeout_mutex);
-                APR_RING_ELEM_INIT(cs, timeout_list);
+                case CONN_STATE_LINGER_NORMAL:
+                case CONN_STATE_LINGER_SHORT:
+                    process_lingering_close(cs, out_pfd);
+                    break;
 
-                rc = push2worker(out_pfd, event_pollset);
-                if (rc != APR_SUCCESS) {
+                default:
                     ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                 ap_server_conf, "push2worker failed");
-                }
-                else {
-                    have_idle_worker = 0;
+                                 ap_server_conf, APLOGNO(03096)
+                                 "event_loop: unexpected state %d",
+                                 cs->pub.state);
+                    ap_assert(0);
                 }
             }
             else if (pt->type == PT_ACCEPT) {
                 /* A Listener Socket is ready for an accept() */
+                if (workers_were_busy) {
+                    if (!listeners_disabled)
+                        disable_listensocks(process_slot);
+                    listeners_disabled = 1;
+                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+                                 "All workers busy, not accepting new conns "
+                                 "in this process");
+                }
+                else if (  (int)apr_atomic_read32(&connection_count)
+                           - (int)apr_atomic_read32(&lingering_count)
+                         > threads_per_child
+                           + ap_queue_info_get_idlers(worker_queue_info) *
+                             worker_factor / WORKER_FACTOR_SCALE)
+                {
+                    if (!listeners_disabled)
+                        disable_listensocks(process_slot);
+                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+                                 "Too many open connections (%u), "
+                                 "not accepting new conns in this process",
+                                 apr_atomic_read32(&connection_count));
+                    ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                                 "Idle workers: %u",
+                                 ap_queue_info_get_idlers(worker_queue_info));
+                    listeners_disabled = 1;
+                }
+                else if (listeners_disabled) {
+                    listeners_disabled = 0;
+                    enable_listensocks(process_slot);
+                }
+                if (!listeners_disabled) {
+                    lr = (ap_listen_rec *) pt->baton;
+                    ap_pop_pool(&ptrans, worker_queue_info);
 
-                lr = (ap_listen_rec *) pt->baton;
-
-                ap_pop_pool(&ptrans, worker_queue_info);
-
-                if (ptrans == NULL) {
-                    /* create a new transaction pool for each accepted socket */
-                    apr_allocator_t *allocator;
-
-                    apr_allocator_create(&allocator);
-                    apr_allocator_max_free_set(allocator,
-                                               ap_max_mem_free);
-                    apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
-                    apr_allocator_owner_set(allocator, ptrans);
                     if (ptrans == NULL) {
-                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                     ap_server_conf,
-                                     "Failed to create transaction pool");
-                        signal_threads(ST_GRACEFUL);
-                        return NULL;
+                        /* create a new transaction pool for each accepted socket */
+                        apr_allocator_t *allocator;
+
+                        apr_allocator_create(&allocator);
+                        apr_allocator_max_free_set(allocator,
+                                                   ap_max_mem_free);
+                        apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
+                        apr_allocator_owner_set(allocator, ptrans);
+                        if (ptrans == NULL) {
+                            ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
+                                         ap_server_conf, APLOGNO(03097)
+                                         "Failed to create transaction pool");
+                            signal_threads(ST_GRACEFUL);
+                            return NULL;
+                        }
                     }
-                }
-                apr_pool_tag(ptrans, "transaction");
+                    apr_pool_tag(ptrans, "transaction");
 
-                rc = lr->accept_func(&csd, lr, ptrans);
+                    get_worker(&have_idle_worker, 1, &workers_were_busy);
+                    rc = lr->accept_func(&csd, lr, ptrans);
 
-                /* later we trash rv and rely on csd to indicate
-                 * success/failure
-                 */
-                AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd);
+                    /* later we trash rv and rely on csd to indicate
+                     * success/failure
+                     */
+                    AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd);
 
-                if (rc == APR_EGENERAL) {
-                    /* E[NM]FILE, ENOMEM, etc */
-                    resource_shortage = 1;
-                    signal_threads(ST_GRACEFUL);
-                }
+                    if (rc == APR_EGENERAL) {
+                        /* E[NM]FILE, ENOMEM, etc */
+                        resource_shortage = 1;
+                        signal_threads(ST_GRACEFUL);
+                    }
 
-                if (csd != NULL) {
-                    rc = ap_queue_push(worker_queue, csd, NULL, ptrans);
-                    if (rc != APR_SUCCESS) {
-                        /* trash the connection; we couldn't queue the connected
-                         * socket to a worker
-                         */
-                        apr_socket_close(csd);
-                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                     ap_server_conf,
-                                     "ap_queue_push failed");
-                        apr_pool_clear(ptrans);
-                        ap_push_pool(worker_queue_info, ptrans);
+                    if (csd != NULL) {
+                        conns_this_child--;
+                        if (push2worker(NULL, csd, ptrans) == APR_SUCCESS) {
+                            have_idle_worker = 0;
+                        }
                     }
                     else {
-                        have_idle_worker = 0;
+                        ap_push_pool(worker_queue_info, ptrans);
                     }
                 }
-                else {
-                    apr_pool_clear(ptrans);
-                    ap_push_pool(worker_queue_info, ptrans);
-                }
             }               /* if:else on pt->type */
-#if HAVE_SERF
-            else if (pt->type == PT_SERF) {
-                /* send socket to serf. */
-                /* XXXX: this doesn't require get_worker(&have_idle_worker) */
-                serf_event_trigger(g_serf, pt->baton, out_pfd);
-            }
-#endif
             out_pfd++;
             num--;
         }                   /* while for processing poll */
@@ -1229,85 +1797,123 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         /* XXX possible optimization: stash the current time for use as
          * r->request_time for new requests
          */
-        time_now = apr_time_now();
-
-        /* handle timed out sockets */
-        apr_thread_mutex_lock(timeout_mutex);
-
-        /* Step 1: keepalive timeouts */
-        cs = APR_RING_FIRST(&keepalive_timeout_head);
-        timeout_time = time_now + TIMEOUT_FUDGE_FACTOR;
-        while (!APR_RING_EMPTY(&keepalive_timeout_head, conn_state_t, timeout_list)
-               && cs->expiration_time < timeout_time) {
+        /* We process the timeout queues here only when their overall next
+         * expiry (read once above) is over. This happens accurately since
+         * adding to the queues (in workers) can only decrease this expiry,
+         * while latest ones are only taken into account here (in listener)
+         * during queues' processing, with the lock held. This works both
+         * with and without wake-ability.
+         */
+        if (timeout_time && timeout_time < (now = apr_time_now())) {
+            timeout_time = now + TIMEOUT_FUDGE_FACTOR;
 
-            cs->state = CONN_STATE_LINGER;
+            /* handle timed out sockets */
+            apr_thread_mutex_lock(timeout_mutex);
 
-            APR_RING_REMOVE(cs, timeout_list);
-            apr_thread_mutex_unlock(timeout_mutex);
+            /* Processing all the queues below will recompute this. */
+            queues_next_expiry = 0;
 
-            if (!get_worker(&have_idle_worker)) {
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_INSERT_HEAD(&keepalive_timeout_head, cs,
-                                     conn_state_t, timeout_list);
-                break;
+            /* Step 1: keepalive timeouts */
+            if (workers_were_busy || dying) {
+                process_keepalive_queue(0); /* kill'em all \m/ */
             }
-
-            rc = push2worker(&cs->pfd, event_pollset);
-
-            if (rc != APR_SUCCESS) {
-                return NULL;
-                /* XXX return NULL looks wrong - not an init failure
-                 * that bypasses all the cleanup outside the main loop
-                 * break seems more like it
-                 * need to evaluate seriousness of push2worker failures
-                 */
+            else {
+                process_keepalive_queue(timeout_time);
             }
-            have_idle_worker = 0;
-            apr_thread_mutex_lock(timeout_mutex);
-            cs = APR_RING_FIRST(&keepalive_timeout_head);
-        }
-
-        /* Step 2: write completion timeouts */
-        cs = APR_RING_FIRST(&timeout_head);
-        while (!APR_RING_EMPTY(&timeout_head, conn_state_t, timeout_list)
-               && cs->expiration_time < timeout_time) {
+            /* Step 2: write completion timeouts */
+            process_timeout_queue(write_completion_q, timeout_time,
+                                  start_lingering_close_nonblocking);
+            /* Step 3: (normal) lingering close completion timeouts */
+            process_timeout_queue(linger_q, timeout_time,
+                                  stop_lingering_close);
+            /* Step 4: (short) lingering close completion timeouts */
+            process_timeout_queue(short_linger_q, timeout_time,
+                                  stop_lingering_close);
 
-            cs->state = CONN_STATE_LINGER;
-            APR_RING_REMOVE(cs, timeout_list);
             apr_thread_mutex_unlock(timeout_mutex);
 
-            if (!get_worker(&have_idle_worker)) {
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_INSERT_HEAD(&timeout_head, cs,
-                                     conn_state_t, timeout_list);
-                break;
-            }
-
-            rc = push2worker(&cs->pfd, event_pollset);
-            if (rc != APR_SUCCESS) {
-                return NULL;
-            }
-            have_idle_worker = 0;
+            ps->keep_alive = apr_atomic_read32(keepalive_q->total);
+            ps->write_completion = apr_atomic_read32(write_completion_q->total);
+            ps->connections = apr_atomic_read32(&connection_count);
+            ps->suspended = apr_atomic_read32(&suspended_count);
+            ps->lingering_close = apr_atomic_read32(&lingering_count);
+        }
+        else if ((workers_were_busy || dying)
+                 && apr_atomic_read32(keepalive_q->total)) {
             apr_thread_mutex_lock(timeout_mutex);
-            cs = APR_RING_FIRST(&timeout_head);
+            process_keepalive_queue(0); /* kill'em all \m/ */
+            apr_thread_mutex_unlock(timeout_mutex);
+            ps->keep_alive = 0;
         }
 
-        apr_thread_mutex_unlock(timeout_mutex);
+        /* If there are some lingering closes to defer (to a worker), schedule
+         * them now. We might wakeup a worker spuriously if another one empties
+         * defer_linger_chain in the meantime, but there also may be no active
+         * or all busy workers for an undefined time.  In any case a deferred
+         * lingering close can't starve if we do that here since the chain is
+         * filled only above in the listener and it's emptied only in the
+         * worker(s); thus a NULL here means it will stay so while the listener
+         * waits (possibly indefinitely) in poll().
+         */
+        if (defer_linger_chain) {
+            get_worker(&have_idle_worker, 0, &workers_were_busy);
+            if (have_idle_worker
+                    && defer_linger_chain /* re-test */
+                    && push2worker(NULL, NULL, NULL) == APR_SUCCESS) {
+                have_idle_worker = 0;
+            }
+        }
 
+        if (listeners_disabled && !workers_were_busy
+            && (int)apr_atomic_read32(&connection_count)
+               - (int)apr_atomic_read32(&lingering_count)
+               < ((int)ap_queue_info_get_idlers(worker_queue_info) - 1)
+                 * worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
+        {
+            listeners_disabled = 0;
+            enable_listensocks(process_slot);
+        }
+        /*
+         * XXX: do we need to set some timeout that re-enables the listensocks
+         * XXX: in case no other event occurs?
+         */
     }     /* listener main loop */
 
-    ap_close_listeners();
+    close_listeners(process_slot, &closed);
     ap_queue_term(worker_queue);
-    dying = 1;
-    ap_scoreboard_image->parent[process_slot].quiescing = 1;
-
-    /* wake up the main thread */
-    kill(ap_my_pid, SIGTERM);
 
     apr_thread_exit(thd, APR_SUCCESS);
     return NULL;
 }
 
+/*
+ * During graceful shutdown, if there are more running worker threads than
+ * open connections, exit one worker thread.
+ *
+ * return 1 if thread should exit, 0 if it should continue running.
+ */
+static int worker_thread_should_exit_early(void)
+{
+    for (;;) {
+        apr_uint32_t conns = apr_atomic_read32(&connection_count);
+        apr_uint32_t dead = apr_atomic_read32(&threads_shutdown);
+        apr_uint32_t newdead;
+
+        AP_DEBUG_ASSERT(dead <= threads_per_child);
+        if (conns >= threads_per_child - dead)
+            return 0;
+
+        newdead = dead + 1;
+        if (apr_atomic_cas32(&threads_shutdown, newdead, dead) == dead) {
+            /*
+             * No other thread has exited in the mean time, safe to exit
+             * this one.
+             */
+            return 1;
+        }
+    }
+}
+
 /* XXX For ungraceful termination/restart, we definitely don't want to
  *     wait for active connections to finish but we may want to wait
  *     for idle workers to get out of the queue code and release mutexes,
@@ -1318,20 +1924,20 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
 static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
 {
     proc_info *ti = dummy;
-    int process_slot = ti->pid;
-    int thread_slot = ti->tid;
+    int process_slot = ti->pslot;
+    int thread_slot = ti->tslot;
     apr_socket_t *csd = NULL;
-    conn_state_t *cs;
+    event_conn_state_t *cs;
     apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
     apr_status_t rv;
     int is_idle = 0;
     timer_event_t *te = NULL;
-    
+
     free(ti);
 
     ap_scoreboard_image->servers[process_slot][thread_slot].pid = ap_my_pid;
     ap_scoreboard_image->servers[process_slot][thread_slot].tid = apr_os_thread_current();
-    ap_scoreboard_image->servers[process_slot][thread_slot].generation = my_generation;
+    ap_scoreboard_image->servers[process_slot][thread_slot].generation = retained->mpm->my_generation;
     ap_update_child_status_from_indexes(process_slot, thread_slot,
                                         SERVER_STARTING, NULL);
 
@@ -1349,14 +1955,17 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
         }
 
         ap_update_child_status_from_indexes(process_slot, thread_slot,
-                                            SERVER_READY, NULL);
+                                            dying ? SERVER_GRACEFUL
+                                                  : SERVER_READY, NULL);
       worker_pop:
         if (workers_may_exit) {
             break;
         }
+        if (dying && worker_thread_should_exit_early()) {
+            break;
+        }
 
         te = NULL;
-        
         rv = ap_queue_pop_something(worker_queue, &csd, &cs, &ptrans, &te);
 
         if (rv != APR_SUCCESS) {
@@ -1383,35 +1992,57 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
             /* We got some other error. */
             else if (!workers_may_exit) {
                 ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
-                             "ap_queue_pop failed");
+                             APLOGNO(03099) "ap_queue_pop failed");
             }
             continue;
         }
         if (te != NULL) {
-            
             te->cbfunc(te->baton);
 
             {
-                apr_thread_mutex_lock(g_timer_ring_mtx);
+                apr_thread_mutex_lock(g_timer_skiplist_mtx);
                 APR_RING_INSERT_TAIL(&timer_free_ring, te, timer_event_t, link);
-                apr_thread_mutex_unlock(g_timer_ring_mtx);
+                apr_thread_mutex_unlock(g_timer_skiplist_mtx);
             }
         }
         else {
             is_idle = 0;
-            worker_sockets[thread_slot] = csd;
-            rv = process_socket(thd, ptrans, csd, cs, process_slot, thread_slot);
-            if (!rv) {
-                requests_this_child--;
+            if (csd != NULL) {
+                worker_sockets[thread_slot] = csd;
+                process_socket(thd, ptrans, csd, cs, process_slot, thread_slot);
+                worker_sockets[thread_slot] = NULL;
+            }
+        }
+
+        /* If there are deferred lingering closes, handle them now. */
+        while (!workers_may_exit) {
+            cs = defer_linger_chain;
+            if (!cs) {
+                break;
+            }
+            if (apr_atomic_casptr((void *)&defer_linger_chain, cs->chain,
+                                  cs) != cs) {
+                /* Race lost, try again */
+                continue;
             }
+            cs->chain = NULL;
+
+            worker_sockets[thread_slot] = csd = cs->pfd.desc.s;
+#ifdef AP_DEBUG
+            rv = apr_socket_timeout_set(csd, SECONDS_TO_LINGER);
+            AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+#else
+            apr_socket_timeout_set(csd, SECONDS_TO_LINGER);
+#endif
+            cs->pub.state = CONN_STATE_LINGER;
+            process_socket(thd, cs->p, csd, cs, process_slot, thread_slot);
             worker_sockets[thread_slot] = NULL;
         }
     }
 
     ap_update_child_status_from_indexes(process_slot, thread_slot,
-                                        (dying) ? SERVER_DEAD :
-                                        SERVER_GRACEFUL,
-                                        (request_rec *) NULL);
+                                        dying ? SERVER_DEAD
+                                              : SERVER_GRACEFUL, NULL);
 
     apr_thread_exit(thd, APR_SUCCESS);
     return NULL;
@@ -1436,14 +2067,13 @@ static void create_listener_thread(thread_starter * ts)
     proc_info *my_info;
     apr_status_t rv;
 
-    my_info = (proc_info *) malloc(sizeof(proc_info));
-    my_info->pid = my_child_num;
-    my_info->tid = -1;          /* listener thread doesn't have a thread slot */
-    my_info->sd = 0;
+    my_info = (proc_info *) ap_malloc(sizeof(proc_info));
+    my_info->pslot = my_child_num;
+    my_info->tslot = -1;      /* listener thread doesn't have a thread slot */
     rv = apr_thread_create(&ts->listener, thread_attr, listener_thread,
                            my_info, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00474)
                      "apr_thread_create: unable to create listener thread");
         /* let the parent decide how bad this really is */
         clean_child_exit(APEXIT_CHILDSICK);
@@ -1461,8 +2091,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     thread_starter *ts = dummy;
     apr_thread_t **threads = ts->threads;
     apr_threadattr_t *thread_attr = ts->threadattr;
-    int child_num_arg = ts->child_num_arg;
-    int my_child_num = child_num_arg;
+    int my_child_num = ts->child_num_arg;
     proc_info *my_info;
     apr_status_t rv;
     int i;
@@ -1470,21 +2099,32 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     int listener_started = 0;
     int loops;
     int prev_threads_created;
+    int max_recycled_pools = -1;
+    int good_methods[] = {APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL};
+    /* XXX don't we need more to handle K-A or lingering close? */
+    const apr_uint32_t pollset_size = threads_per_child * 2;
 
     /* We must create the fd queues before we start up the listener
      * and worker threads. */
     worker_queue = apr_pcalloc(pchild, sizeof(*worker_queue));
     rv = ap_queue_init(worker_queue, threads_per_child, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03100)
                      "ap_queue_init() failed");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
+    if (ap_max_mem_free != APR_ALLOCATOR_MAX_FREE_UNLIMITED) {
+        /* If we want to conserve memory, let's not keep an unlimited number of
+         * pools & allocators.
+         * XXX: This should probably be a separate config directive
+         */
+        max_recycled_pools = threads_per_child * 3 / 4 ;
+    }
     rv = ap_queue_info_create(&worker_queue_info, pchild,
-                              threads_per_child);
+                              threads_per_child, max_recycled_pools);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03101)
                      "ap_queue_info_create() failed");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
@@ -1495,21 +2135,42 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     rv = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_DEFAULT,
                                  pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03102)
                      "creation of the timeout mutex failed.");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
     /* Create the main pollset */
-    rv = apr_pollset_create(&event_pollset,
-                            threads_per_child,
-                            pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
+    for (i = 0; i < sizeof(good_methods) / sizeof(good_methods[0]); i++) {
+        apr_uint32_t flags = APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY |
+                             APR_POLLSET_NODEFAULT | APR_POLLSET_WAKEABLE;
+        rv = apr_pollset_create_ex(&event_pollset, pollset_size, pchild, flags,
+                                   good_methods[i]);
+        if (rv == APR_SUCCESS) {
+            listener_is_wakeable = 1;
+            break;
+        }
+        flags &= ~APR_POLLSET_WAKEABLE;
+        rv = apr_pollset_create_ex(&event_pollset, pollset_size, pchild, flags,
+                                   good_methods[i]);
+        if (rv == APR_SUCCESS) {
+            break;
+        }
+    }
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
+        rv = apr_pollset_create(&event_pollset, pollset_size, pchild,
+                                APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
+    }
+    if (rv != APR_SUCCESS) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03103)
                      "apr_pollset_create with Thread Safety failed.");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471)
+                 "start_threads: Using %s (%swakeable)",
+                 apr_pollset_method_name(event_pollset),
+                 listener_is_wakeable ? "" : "not ");
     worker_sockets = apr_pcalloc(pchild, threads_per_child
                                  * sizeof(apr_socket_t *));
 
@@ -1518,21 +2179,15 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
         /* threads_per_child does not include the listener thread */
         for (i = 0; i < threads_per_child; i++) {
             int status =
-                ap_scoreboard_image->servers[child_num_arg][i].status;
+                ap_scoreboard_image->servers[my_child_num][i].status;
 
-            if (status != SERVER_GRACEFUL && status != SERVER_DEAD) {
+            if (status != SERVER_DEAD) {
                 continue;
             }
 
-            my_info = (proc_info *) malloc(sizeof(proc_info));
-            if (my_info == NULL) {
-                ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                             "malloc: out of memory");
-                clean_child_exit(APEXIT_CHILDFATAL);
-            }
-            my_info->pid = my_child_num;
-            my_info->tid = i;
-            my_info->sd = 0;
+            my_info = (proc_info *) ap_malloc(sizeof(proc_info));
+            my_info->pslot = my_child_num;
+            my_info->tslot = i;
 
             /* We are creating threads right now */
             ap_update_child_status_from_indexes(my_child_num, i,
@@ -1544,6 +2199,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
                                    worker_thread, my_info, pchild);
             if (rv != APR_SUCCESS) {
                 ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+                             APLOGNO(03104)
                              "apr_thread_create: unable to create worker thread");
                 /* let the parent decide how bad this really is */
                 clean_child_exit(APEXIT_CHILDSICK);
@@ -1607,26 +2263,20 @@ static void join_workers(apr_thread_t * listener, apr_thread_t ** threads)
          */
 
         iter = 0;
-        while (iter < 10 &&
-#ifdef HAVE_PTHREAD_KILL
-               pthread_kill(*listener_os_thread, 0)
-#else
-               kill(ap_my_pid, 0)
-#endif
-               == 0) {
-            /* listener not dead yet */
+        while (iter < 10 && !dying) {
+            /* listener has not stopped accepting yet */
             apr_sleep(apr_time_make(0, 500000));
             wakeup_listener();
             ++iter;
         }
         if (iter >= 10) {
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                         "the listener thread didn't exit");
+            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00475)
+                         "the listener thread didn't stop accepting");
         }
         else {
             rv = apr_thread_join(&thread_rv, listener);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00476)
                              "apr_thread_join: unable to join listener thread");
             }
         }
@@ -1636,7 +2286,7 @@ static void join_workers(apr_thread_t * listener, apr_thread_t ** threads)
         if (threads[i]) {       /* if we ever created this thread */
             rv = apr_thread_join(&thread_rv, threads[i]);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00477)
                              "apr_thread_join: unable to join worker "
                              "thread %d", i);
             }
@@ -1654,26 +2304,35 @@ static void join_start_thread(apr_thread_t * start_thread_id)
                                  */
     rv = apr_thread_join(&thread_rv, start_thread_id);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00478)
                      "apr_thread_join: unable to join the start " "thread");
     }
 }
 
-static void child_main(int child_num_arg)
+static void child_main(int child_num_arg, int child_bucket)
 {
     apr_thread_t **threads;
     apr_status_t rv;
     thread_starter *ts;
     apr_threadattr_t *thread_attr;
     apr_thread_t *start_thread_id;
+    int i;
+
+    /* for benefit of any hooks that run as this child initializes */
+    retained->mpm->mpm_state = AP_MPMQ_STARTING;
 
-    mpm_state = AP_MPMQ_STARTING;       /* for benefit of any hooks that run as this
-                                         * child initializes
-                                         */
     ap_my_pid = getpid();
     ap_fatal_signal_child_setup(ap_server_conf);
     apr_pool_create(&pchild, pconf);
 
+    /* close unused listeners and pods */
+    for (i = 0; i < retained->mpm->num_buckets; i++) {
+        if (i != child_bucket) {
+            ap_close_listeners_ex(all_buckets[i].listeners);
+            ap_mpm_podx_close(all_buckets[i].pod);
+        }
+    }
+
     /*stuff to do before we switch id's, so we have permissions. */
     ap_reopen_scoreboard(pchild, NULL, 0);
 
@@ -1681,10 +2340,10 @@ static void child_main(int child_num_arg)
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
-    apr_thread_mutex_create(&g_timer_ring_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
+    apr_thread_mutex_create(&g_timer_skiplist_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
     APR_RING_INIT(&timer_free_ring, timer_event_t, link);
-    APR_RING_INIT(&timer_ring, timer_event_t, link);
-    
+    apr_skiplist_init(&timer_skiplist, pchild);
+    apr_skiplist_set_compare(timer_skiplist, timer_comp, timer_comp);
     ap_run_child_init(pchild, ap_server_conf);
 
     /* done with init critical section */
@@ -1695,17 +2354,17 @@ static void child_main(int child_num_arg)
      */
     rv = apr_setup_signal_thread();
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(00479)
                      "Couldn't initialize signal thread");
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
     if (ap_max_requests_per_child) {
-        requests_this_child = ap_max_requests_per_child;
+        conns_this_child = ap_max_requests_per_child;
     }
     else {
         /* coding a value of zero means infinity */
-        requests_this_child = INT_MAX;
+        conns_this_child = APR_INT32_MAX;
     }
 
     /* Setup worker threads */
@@ -1713,23 +2372,21 @@ static void child_main(int child_num_arg)
     /* clear the storage; we may not create all our threads immediately,
      * and we want a 0 entry to indicate a thread which was not created
      */
-    threads = (apr_thread_t **) calloc(1,
-                                       sizeof(apr_thread_t *) *
-                                       threads_per_child);
-    if (threads == NULL) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                     "malloc: out of memory");
-        clean_child_exit(APEXIT_CHILDFATAL);
-    }
-
-    ts = (thread_starter *) apr_palloc(pchild, sizeof(*ts));
+    threads = ap_calloc(threads_per_child, sizeof(apr_thread_t *));
+    ts = apr_palloc(pchild, sizeof(*ts));
 
     apr_threadattr_create(&thread_attr, pchild);
     /* 0 means PTHREAD_CREATE_JOINABLE */
     apr_threadattr_detach_set(thread_attr, 0);
 
     if (ap_thread_stacksize != 0) {
-        apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
+        rv = apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
+        if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
+            ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(02436)
+                         "WARNING: ThreadStackSize of %" APR_SIZE_T_FMT " is "
+                         "inappropriate, using default", 
+                         ap_thread_stacksize);
+        }
     }
 
     ts->threads = threads;
@@ -1740,13 +2397,13 @@ static void child_main(int child_num_arg)
     rv = apr_thread_create(&start_thread_id, thread_attr, start_threads,
                            ts, pchild);
     if (rv != APR_SUCCESS) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
+        ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00480)
                      "apr_thread_create: unable to create worker thread");
         /* let the parent decide how bad this really is */
         clean_child_exit(APEXIT_CHILDSICK);
     }
 
-    mpm_state = AP_MPMQ_RUNNING;
+    retained->mpm->mpm_state = AP_MPMQ_RUNNING;
 
     /* If we are only running in one_process mode, we will want to
      * still handle signals. */
@@ -1779,31 +2436,31 @@ static void child_main(int child_num_arg)
     else {                      /* !one_process */
         /* remove SIGTERM from the set of blocked signals...  if one of
          * the other threads in the process needs to take us down
-         * (e.g., for MaxRequestsPerChild) it will send us SIGTERM
+         * (e.g., for MaxConnectionsPerChild) it will send us SIGTERM
          */
         unblock_signal(SIGTERM);
         apr_signal(SIGTERM, dummy_signal_handler);
         /* Watch for any messages from the parent over the POD */
         while (1) {
-            rv = ap_event_pod_check(pod);
-            if (rv == AP_NORESTART) {
+            rv = ap_mpm_podx_check(my_bucket->pod);
+            if (rv == AP_MPM_PODX_NORESTART) {
                 /* see if termination was triggered while we slept */
                 switch (terminate_mode) {
                 case ST_GRACEFUL:
-                    rv = AP_GRACEFUL;
+                    rv = AP_MPM_PODX_GRACEFUL;
                     break;
                 case ST_UNGRACEFUL:
-                    rv = AP_RESTART;
+                    rv = AP_MPM_PODX_RESTART;
                     break;
                 }
             }
-            if (rv == AP_GRACEFUL || rv == AP_RESTART) {
+            if (rv == AP_MPM_PODX_GRACEFUL || rv == AP_MPM_PODX_RESTART) {
                 /* make sure the start thread has finished;
                  * signal_threads() and join_workers depend on that
                  */
                 join_start_thread(start_thread_id);
                 signal_threads(rv ==
-                               AP_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
+                               AP_MPM_PODX_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
                 break;
             }
         }
@@ -1823,22 +2480,35 @@ static void child_main(int child_num_arg)
     clean_child_exit(resource_shortage ? APEXIT_CHILDSICK : 0);
 }
 
-static int make_child(server_rec * s, int slot)
+static int make_child(server_rec * s, int slot, int bucket)
 {
     int pid;
 
-    if (slot + 1 > max_daemons_limit) {
-        max_daemons_limit = slot + 1;
+    if (slot + 1 > retained->max_daemons_limit) {
+        retained->max_daemons_limit = slot + 1;
+    }
+
+    if (ap_scoreboard_image->parent[slot].pid != 0) {
+        /* XXX replace with assert or remove ? */
+        ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(03455)
+                 "BUG: Scoreboard slot %d should be empty but is "
+                 "in use by pid %" APR_PID_T_FMT,
+                 slot, ap_scoreboard_image->parent[slot].pid);
+        return -1;
     }
 
     if (one_process) {
-        set_signals();
-        ap_scoreboard_image->parent[slot].pid = getpid();
-        child_main(slot);
+        my_bucket = &all_buckets[0];
+
+        event_note_child_started(slot, getpid());
+        child_main(slot, 0);
+        /* NOTREACHED */
+        ap_assert(0);
+        return -1;
     }
 
     if ((pid = fork()) == -1) {
-        ap_log_error(APLOG_MARK, APLOG_ERR, errno, s,
+        ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, APLOGNO(00481)
                      "fork: Unable to fork new process");
 
         /* fork didn't succeed.  There's no need to touch the scoreboard;
@@ -1857,6 +2527,8 @@ static int make_child(server_rec * s, int slot)
     }
 
     if (!pid) {
+        my_bucket = &all_buckets[bucket];
+
 #ifdef HAVE_BINDPROCESSOR
         /* By default, AIX binds to a single processor.  This bit unbinds
          * children which will then bind to another CPU.
@@ -1865,34 +2537,24 @@ static int make_child(server_rec * s, int slot)
                                    PROCESSOR_CLASS_ANY);
         if (status != OK)
             ap_log_error(APLOG_MARK, APLOG_DEBUG, errno,
-                         ap_server_conf,
+                         ap_server_conf, APLOGNO(00482)
                          "processor unbind failed");
 #endif
         RAISE_SIGSTOP(MAKE_CHILD);
 
         apr_signal(SIGTERM, just_die);
-        child_main(slot);
-
-        clean_child_exit(0);
-    }
-    /* else */
-    if (ap_scoreboard_image->parent[slot].pid != 0) {
-        /* This new child process is squatting on the scoreboard
-         * entry owned by an exiting child process, which cannot
-         * exit until all active requests complete.
-         * Don't forget about this exiting child process, or we
-         * won't be able to kill it if it doesn't exit by the
-         * time the server is shut down.
-         */
-        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                     "taking over scoreboard slot from %" APR_PID_T_FMT "%s",
-                     ap_scoreboard_image->parent[slot].pid,
-                     ap_scoreboard_image->parent[slot].quiescing ?
-                         " (quiescing)" : "");
-        ap_register_extra_mpm_process(ap_scoreboard_image->parent[slot].pid);
+        child_main(slot, bucket);
+        /* NOTREACHED */
+        ap_assert(0);
+        return -1;
     }
+
     ap_scoreboard_image->parent[slot].quiescing = 0;
-    ap_scoreboard_image->parent[slot].pid = pid;
+    ap_scoreboard_image->parent[slot].not_accepting = 0;
+    ap_scoreboard_image->parent[slot].bucket = bucket;
+    event_note_child_started(slot, pid);
+    active_daemons++;
+    retained->total_daemons++;
     return 0;
 }
 
@@ -1901,208 +2563,189 @@ static void startup_children(int number_to_start)
 {
     int i;
 
-    for (i = 0; number_to_start && i < ap_daemons_limit; ++i) {
+    for (i = 0; number_to_start && i < server_limit; ++i) {
         if (ap_scoreboard_image->parent[i].pid != 0) {
             continue;
         }
-        if (make_child(ap_server_conf, i) < 0) {
+        if (make_child(ap_server_conf, i, i % retained->mpm->num_buckets) < 0) {
             break;
         }
         --number_to_start;
     }
 }
 
-
-/*
- * idle_spawn_rate is the number of children that will be spawned on the
- * next maintenance cycle if there aren't enough idle servers.  It is
- * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by
- * without the need to spawn.
- */
-static int idle_spawn_rate = 1;
-#ifndef MAX_SPAWN_RATE
-#define MAX_SPAWN_RATE        (32)
-#endif
-static int hold_off_on_exponential_spawning;
-
-static void perform_idle_server_maintenance(void)
+static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
 {
     int i, j;
-    int idle_thread_count;
+    int idle_thread_count = 0;
     worker_score *ws;
     process_score *ps;
-    int free_length;
-    int totally_free_length = 0;
+    int free_length = 0;
     int free_slots[MAX_SPAWN_RATE];
-    int last_non_dead;
-    int total_non_dead;
+    int last_non_dead = -1;
     int active_thread_count = 0;
 
-    /* initialize the free_list */
-    free_length = 0;
-
-    idle_thread_count = 0;
-    last_non_dead = -1;
-    total_non_dead = 0;
-
-    for (i = 0; i < ap_daemons_limit; ++i) {
+    for (i = 0; i < server_limit; ++i) {
         /* Initialization to satisfy the compiler. It doesn't know
          * that threads_per_child is always > 0 */
         int status = SERVER_DEAD;
-        int any_dying_threads = 0;
-        int any_dead_threads = 0;
-        int all_dead_threads = 1;
+        int child_threads_active = 0;
 
-        if (i >= max_daemons_limit
-            && totally_free_length == idle_spawn_rate)
+        if (i >= retained->max_daemons_limit &&
+            free_length == retained->idle_spawn_rate[child_bucket]) {
             /* short cut if all active processes have been examined and
              * enough empty scoreboard slots have been found
              */
-        
+
             break;
+        }
         ps = &ap_scoreboard_image->parent[i];
-        for (j = 0; j < threads_per_child; j++) {
-            ws = &ap_scoreboard_image->servers[i][j];
-            status = ws->status;
-
-            /* XXX any_dying_threads is probably no longer needed    GLA */
-            any_dying_threads = any_dying_threads ||
-                (status == SERVER_GRACEFUL);
-            any_dead_threads = any_dead_threads || (status == SERVER_DEAD);
-            all_dead_threads = all_dead_threads &&
-                (status == SERVER_DEAD || status == SERVER_GRACEFUL);
-
-            /* We consider a starting server as idle because we started it
-             * at least a cycle ago, and if it still hasn't finished starting
-             * then we're just going to swamp things worse by forking more.
-             * So we hopefully won't need to fork more if we count it.
-             * This depends on the ordering of SERVER_READY and SERVER_STARTING.
-             */
-            if (ps->pid != 0) { /* XXX just set all_dead_threads in outer
-                                   for loop if no pid?  not much else matters */
-                if (status <= SERVER_READY &&
-                        !ps->quiescing && ps->generation == my_generation) {
+        if (ps->pid != 0) {
+            for (j = 0; j < threads_per_child; j++) {
+                ws = &ap_scoreboard_image->servers[i][j];
+                status = ws->status;
+
+                /* We consider a starting server as idle because we started it
+                 * at least a cycle ago, and if it still hasn't finished starting
+                 * then we're just going to swamp things worse by forking more.
+                 * So we hopefully won't need to fork more if we count it.
+                 * This depends on the ordering of SERVER_READY and SERVER_STARTING.
+                 */
+                if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting
+                    && ps->generation == retained->mpm->my_generation
+                    && ps->bucket == child_bucket)
+                {
                     ++idle_thread_count;
                 }
                 if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
-                    ++active_thread_count;
+                    ++child_threads_active;
                 }
             }
-        }
-        if (any_dead_threads
-            && totally_free_length < idle_spawn_rate
-            && free_length < MAX_SPAWN_RATE
-            && (!ps->pid      /* no process in the slot */
-                  || ps->quiescing)) {  /* or at least one is going away */
-            if (all_dead_threads) {
-                /* great! we prefer these, because the new process can
-                 * start more threads sooner.  So prioritize this slot
-                 * by putting it ahead of any slots with active threads.
-                 *
-                 * first, make room by moving a slot that's potentially still
-                 * in use to the end of the array
-                 */
-                free_slots[free_length] = free_slots[totally_free_length];
-                free_slots[totally_free_length++] = i;
-            }
-            else {
-                /* slot is still in use - back of the bus
-                 */
-                free_slots[free_length] = i;
-            }
-            ++free_length;
-        }
-        /* XXX if (!ps->quiescing)     is probably more reliable  GLA */
-        if (!any_dying_threads) {
             last_non_dead = i;
-            ++total_non_dead;
         }
+        active_thread_count += child_threads_active;
+        if (!ps->pid && free_length < retained->idle_spawn_rate[child_bucket])
+            free_slots[free_length++] = i;
+        else if (child_threads_active == threads_per_child)
+            had_healthy_child = 1;
     }
 
-    if (sick_child_detected) {
-        if (active_thread_count > 0) {
-            /* some child processes appear to be working.  don't kill the
-             * whole server.
+    if (retained->sick_child_detected) {
+        if (had_healthy_child) {
+            /* Assume this is a transient error, even though it may not be.  Leave
+             * the server up in case it is able to serve some requests or the
+             * problem will be resolved.
              */
-            sick_child_detected = 0;
+            retained->sick_child_detected = 0;
         }
         else {
-            /* looks like a basket case give up.
+            /* looks like a basket case, as no child ever fully initialized; give up.
              */
-            shutdown_pending = 1;
+            retained->mpm->shutdown_pending = 1;
             child_fatal = 1;
             ap_log_error(APLOG_MARK, APLOG_ALERT, 0,
-                         ap_server_conf,
-                         "No active workers found..."
-                         " Apache is exiting!");
+                         ap_server_conf, APLOGNO(02324)
+                         "A resource shortage or other unrecoverable failure "
+                         "was encountered before any child process initialized "
+                         "successfully... httpd is exiting!");
             /* the child already logged the failure details */
             return;
         }
     }
 
-    max_daemons_limit = last_non_dead + 1;
+    retained->max_daemons_limit = last_non_dead + 1;
 
-    if (idle_thread_count > max_spare_threads) {
-        /* Kill off one child */
-        ap_event_pod_signal(pod, TRUE);
-        idle_spawn_rate = 1;
+    if (idle_thread_count > max_spare_threads / num_buckets)
+    {
+        /*
+         * Child processes that we ask to shut down won't die immediately
+         * but may stay around for a long time when they finish their
+         * requests. If the server load changes many times, many such
+         * gracefully finishing processes may accumulate, filling up the
+         * scoreboard. To avoid running out of scoreboard entries, we
+         * don't shut down more processes when the total number of processes
+         * is high.
+         *
+         * XXX It would be nice if we could
+         * XXX - kill processes without keepalive connections first
+         * XXX - tell children to stop accepting new connections, and
+         * XXX   depending on server load, later be able to resurrect them
+         *       or kill them
+         */
+        if (retained->total_daemons <= active_daemons_limit &&
+            retained->total_daemons < server_limit) {
+            /* Kill off one child */
+            ap_mpm_podx_signal(all_buckets[child_bucket].pod,
+                               AP_MPM_PODX_GRACEFUL);
+            retained->idle_spawn_rate[child_bucket] = 1;
+            active_daemons--;
+        } else {
+            ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, ap_server_conf,
+                         "Not shutting down child: total daemons %d / "
+                         "active limit %d / ServerLimit %d",
+                         retained->total_daemons, active_daemons_limit,
+                         server_limit);
+        }
     }
-    else if (idle_thread_count < min_spare_threads) {
-        /* terminate the free list */
-        if (free_length == 0) { /* scoreboard is full, can't fork */
-
-            if (active_thread_count >= ap_daemons_limit * threads_per_child) {
-                static int reported = 0;
-                if (!reported) {
-                    /* only report this condition once */
-                    ap_log_error(APLOG_MARK, APLOG_ERR, 0,
-                                 ap_server_conf,
-                                 "server reached MaxClients setting, consider"
-                                 " raising the MaxClients setting");
-                    reported = 1;
-                }
-            }
-            else {
-                ap_log_error(APLOG_MARK, APLOG_ERR, 0,
-                             ap_server_conf,
-                             "scoreboard is full, not at MaxClients");
+    else if (idle_thread_count < min_spare_threads / num_buckets) {
+        if (active_thread_count >= max_workers) {
+            if (!retained->maxclients_reported) {
+                /* only report this condition once */
+                ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00484)
+                             "server reached MaxRequestWorkers setting, "
+                             "consider raising the MaxRequestWorkers "
+                             "setting");
+                retained->maxclients_reported = 1;
             }
-            idle_spawn_rate = 1;
+            retained->idle_spawn_rate[child_bucket] = 1;
+        }
+        else if (free_length == 0) { /* scoreboard is full, can't fork */
+            ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(03490)
+                         "scoreboard is full, not at MaxRequestWorkers."
+                         "Increase ServerLimit.");
+            retained->idle_spawn_rate[child_bucket] = 1;
         }
         else {
-            if (free_length > idle_spawn_rate) {
-                free_length = idle_spawn_rate;
+            if (free_length > retained->idle_spawn_rate[child_bucket]) {
+                free_length = retained->idle_spawn_rate[child_bucket];
             }
-            if (idle_spawn_rate >= 8) {
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0,
-                             ap_server_conf,
+            if (retained->idle_spawn_rate[child_bucket] >= 8) {
+                ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00486)
                              "server seems busy, (you may need "
                              "to increase StartServers, ThreadsPerChild "
                              "or Min/MaxSpareThreads), "
                              "spawning %d children, there are around %d idle "
-                             "threads, and %d total children", free_length,
-                             idle_thread_count, total_non_dead);
+                             "threads, %d active children, and %d children "
+                             "that are shutting down", free_length,
+                             idle_thread_count, active_daemons,
+                             retained->total_daemons);
             }
             for (i = 0; i < free_length; ++i) {
-                make_child(ap_server_conf, free_slots[i]);
+                ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, ap_server_conf,
+                             "Spawning new child: slot %d active / "
+                             "total daemons: %d/%d",
+                             free_slots[i], active_daemons,
+                             retained->total_daemons);
+                make_child(ap_server_conf, free_slots[i], child_bucket);
             }
             /* the next time around we want to spawn twice as many if this
              * wasn't good enough, but not if we've just done a graceful
              */
-            if (hold_off_on_exponential_spawning) {
-                --hold_off_on_exponential_spawning;
+            if (retained->hold_off_on_exponential_spawning) {
+                --retained->hold_off_on_exponential_spawning;
             }
-            else if (idle_spawn_rate < MAX_SPAWN_RATE) {
-                idle_spawn_rate *= 2;
+            else if (retained->idle_spawn_rate[child_bucket]
+                     < MAX_SPAWN_RATE / num_buckets) {
+                retained->idle_spawn_rate[child_bucket] *= 2;
             }
         }
     }
     else {
-        idle_spawn_rate = 1;
+        retained->idle_spawn_rate[child_bucket] = 1;
     }
 }
 
-static void server_main_loop(int remaining_children_to_start)
+static void server_main_loop(int remaining_children_to_start, int num_buckets)
 {
     int child_slot;
     apr_exit_why_e exitwhy;
@@ -2110,60 +2753,82 @@ static void server_main_loop(int remaining_children_to_start)
     apr_proc_t pid;
     int i;
 
-    while (!restart_pending && !shutdown_pending) {
+    while (!retained->mpm->restart_pending && !retained->mpm->shutdown_pending) {
         ap_wait_or_timeout(&exitwhy, &status, &pid, pconf, ap_server_conf);
 
         if (pid.pid != -1) {
             processed_status = ap_process_child_status(&pid, exitwhy, status);
+            child_slot = ap_find_child_by_pid(&pid);
             if (processed_status == APEXIT_CHILDFATAL) {
-                shutdown_pending = 1;
-                child_fatal = 1;
-                return;
+                /* fix race condition found in PR 39311
+                 * A child created at the same time as a graceful happens 
+                 * can find the lock missing and create a fatal error.
+                 * It is not fatal for the last generation to be in this state.
+                 */
+                if (child_slot < 0
+                    || ap_get_scoreboard_process(child_slot)->generation
+                       == retained->mpm->my_generation) {
+                    retained->mpm->shutdown_pending = 1;
+                    child_fatal = 1;
+                    /*
+                     * total_daemons counting will be off now, but as we
+                     * are shutting down, that is not an issue anymore.
+                     */
+                    return;
+                }
+                else {
+                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(00487)
+                                 "Ignoring fatal error in child of previous "
+                                 "generation (pid %ld).",
+                                 (long)pid.pid);
+                    retained->sick_child_detected = 1;
+                }
             }
             else if (processed_status == APEXIT_CHILDSICK) {
                 /* tell perform_idle_server_maintenance to check into this
                  * on the next timer pop
                  */
-                sick_child_detected = 1;
+                retained->sick_child_detected = 1;
             }
             /* non-fatal death... note that it's gone in the scoreboard. */
-            child_slot = ap_find_child_by_pid(&pid);
             if (child_slot >= 0) {
+                process_score *ps;
+
                 for (i = 0; i < threads_per_child; i++)
                     ap_update_child_status_from_indexes(child_slot, i,
-                                                        SERVER_DEAD,
-                                                        (request_rec *) NULL);
-
-                ap_scoreboard_image->parent[child_slot].pid = 0;
-                ap_scoreboard_image->parent[child_slot].quiescing = 0;
+                                                        SERVER_DEAD, NULL);
+
+                event_note_child_killed(child_slot, 0, 0);
+                ps = &ap_scoreboard_image->parent[child_slot];
+                if (!ps->quiescing)
+                    active_daemons--;
+                ps->quiescing = 0;
+                /* NOTE: We don't dec in the (child_slot < 0) case! */
+                retained->total_daemons--;
                 if (processed_status == APEXIT_CHILDSICK) {
                     /* resource shortage, minimize the fork rate */
-                    idle_spawn_rate = 1;
+                    retained->idle_spawn_rate[ps->bucket] = 1;
                 }
-                else if (remaining_children_to_start
-                         && child_slot < ap_daemons_limit) {
+                else if (remaining_children_to_start) {
                     /* we're still doing a 1-for-1 replacement of dead
                      * children with new children
                      */
-                    make_child(ap_server_conf, child_slot);
+                    make_child(ap_server_conf, child_slot, ps->bucket);
                     --remaining_children_to_start;
                 }
             }
-            else if (ap_unregister_extra_mpm_process(pid.pid) == 1) {
-                /* handled */
 #if APR_HAS_OTHER_CHILD
-            }
             else if (apr_proc_other_child_alert(&pid, APR_OC_REASON_DEATH,
                                                 status) == 0) {
                 /* handled */
-#endif
             }
-            else if (is_graceful) {
+#endif
+            else if (retained->mpm->was_graceful) {
                 /* Great, we've probably just lost a slot in the
                  * scoreboard.  Somehow we don't know about this child.
                  */
                 ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
-                             ap_server_conf,
+                             ap_server_conf, APLOGNO(00488)
                              "long lost child came home! (pid %ld)",
                              (long) pid.pid);
             }
@@ -2188,31 +2853,53 @@ static void server_main_loop(int remaining_children_to_start)
             continue;
         }
 
-        perform_idle_server_maintenance();
+        for (i = 0; i < num_buckets; i++) {
+            perform_idle_server_maintenance(i, num_buckets);
+        }
     }
 }
 
 static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
 {
+    int num_buckets = retained->mpm->num_buckets;
     int remaining_children_to_start;
+    int i;
 
     ap_log_pid(pconf, ap_pid_fname);
 
-    if (!is_graceful) {
+    if (!retained->mpm->was_graceful) {
         if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) {
-            mpm_state = AP_MPMQ_STOPPING;
-            return DONE;
+            retained->mpm->mpm_state = AP_MPMQ_STOPPING;
+            return !OK;
         }
         /* fix the generation number in the global score; we just got a new,
          * cleared scoreboard
          */
-        ap_scoreboard_image->global->running_generation = my_generation;
+        ap_scoreboard_image->global->running_generation = retained->mpm->my_generation;
+    }
+
+    if (!one_process) {
+        ap_fatal_signal_setup(ap_server_conf, pconf);
     }
+    ap_unixd_mpm_set_signals(pconf, one_process);
 
-    set_signals();
-    /* Don't thrash... */
-    if (max_spare_threads < min_spare_threads + threads_per_child)
-        max_spare_threads = min_spare_threads + threads_per_child;
+    /* Don't thrash since num_buckets depends on the
+     * system and the number of online CPU cores...
+     */
+    if (active_daemons_limit < num_buckets)
+        active_daemons_limit = num_buckets;
+    if (ap_daemons_to_start < num_buckets)
+        ap_daemons_to_start = num_buckets;
+    /* We want to create as much children at a time as the number of buckets,
+     * so to optimally accept connections (evenly distributed across buckets).
+     * Thus min_spare_threads should at least maintain num_buckets children,
+     * and max_spare_threads allow num_buckets more children w/o triggering
+     * immediately (e.g. num_buckets idle threads margin, one per bucket).
+     */
+    if (min_spare_threads < threads_per_child * (num_buckets - 1) + num_buckets)
+        min_spare_threads = threads_per_child * (num_buckets - 1) + num_buckets;
+    if (max_spare_threads < min_spare_threads + (threads_per_child + 1) * num_buckets)
+        max_spare_threads = min_spare_threads + (threads_per_child + 1) * num_buckets;
 
     /* If we're doing a graceful_restart then we're going to see a lot
      * of children exiting immediately when we get into the main loop
@@ -2224,54 +2911,54 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
      * supposed to start up without the 1 second penalty between each fork.
      */
     remaining_children_to_start = ap_daemons_to_start;
-    if (remaining_children_to_start > ap_daemons_limit) {
-        remaining_children_to_start = ap_daemons_limit;
+    if (remaining_children_to_start > active_daemons_limit) {
+        remaining_children_to_start = active_daemons_limit;
     }
-    if (!is_graceful) {
+    if (!retained->mpm->was_graceful) {
         startup_children(remaining_children_to_start);
         remaining_children_to_start = 0;
     }
     else {
         /* give the system some time to recover before kicking into
          * exponential mode */
-        hold_off_on_exponential_spawning = 10;
+        retained->hold_off_on_exponential_spawning = 10;
     }
 
-    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00489)
                  "%s configured -- resuming normal operations",
                  ap_get_server_description());
-    ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
+    ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00490)
                  "Server built: %s", ap_get_server_built());
     ap_log_command_line(plog, s);
+    ap_log_mpm_common(s);
 
-    restart_pending = shutdown_pending = 0;
-    mpm_state = AP_MPMQ_RUNNING;
+    retained->mpm->mpm_state = AP_MPMQ_RUNNING;
 
-    server_main_loop(remaining_children_to_start);
-    mpm_state = AP_MPMQ_STOPPING;
+    server_main_loop(remaining_children_to_start, num_buckets);
+    retained->mpm->mpm_state = AP_MPMQ_STOPPING;
 
-    if (shutdown_pending && !is_graceful) {
+    if (retained->mpm->shutdown_pending && retained->mpm->is_ungraceful) {
         /* Time to shut down:
          * Kill child processes, tell them to call child_exit, etc...
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
-        ap_reclaim_child_processes(1);  /* Start with SIGTERM */
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
+        ap_reclaim_child_processes(1, /* Start with SIGTERM */
+                                   event_note_child_killed);
 
         if (!child_fatal) {
             /* cleanup pid file on normal shutdown */
-            const char *pidfile = NULL;
-            pidfile = ap_server_root_relative(pconf, ap_pid_fname);
-            if (pidfile != NULL && unlink(pidfile) == 0)
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0,
-                             ap_server_conf,
-                             "removed PID file %s (pid=%ld)",
-                             pidfile, (long) getpid());
-
+            ap_remove_pid(pconf, ap_pid_fname);
             ap_log_error(APLOG_MARK, APLOG_NOTICE, 0,
-                         ap_server_conf, "caught SIGTERM, shutting down");
+                         ap_server_conf, APLOGNO(00491) "caught SIGTERM, shutting down");
         }
+
         return DONE;
-    } else if (shutdown_pending) {
+    }
+
+    if (retained->mpm->shutdown_pending) {
         /* Time to gracefully shut down:
          * Kill child processes, tell them to call child_exit, etc...
          */
@@ -2281,20 +2968,16 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
 
         /* Close our listeners, and then ask our children to do same */
         ap_close_listeners();
-        ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
-        ap_relieve_child_processes();
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
+                               AP_MPM_PODX_GRACEFUL);
+        }
+        ap_relieve_child_processes(event_note_child_killed);
 
         if (!child_fatal) {
             /* cleanup pid file on normal shutdown */
-            const char *pidfile = NULL;
-            pidfile = ap_server_root_relative (pconf, ap_pid_fname);
-            if ( pidfile != NULL && unlink(pidfile) == 0)
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0,
-                             ap_server_conf,
-                             "removed PID file %s (pid=%ld)",
-                             pidfile, (long)getpid());
-
-            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+            ap_remove_pid(pconf, ap_pid_fname);
+            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00492)
                          "caught " AP_SIG_GRACEFUL_STOP_STRING
                          ", shutting down gracefully");
         }
@@ -2305,38 +2988,39 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
         }
 
         /* Don't really exit until each child has finished */
-        shutdown_pending = 0;
+        retained->mpm->shutdown_pending = 0;
         do {
             /* Pause for a second */
             apr_sleep(apr_time_from_sec(1));
 
             /* Relieve any children which have now exited */
-            ap_relieve_child_processes();
+            ap_relieve_child_processes(event_note_child_killed);
 
             active_children = 0;
-            for (index = 0; index < ap_daemons_limit; ++index) {
+            for (index = 0; index < retained->max_daemons_limit; ++index) {
                 if (ap_mpm_safe_kill(MPM_CHILD_PID(index), 0) == APR_SUCCESS) {
                     active_children = 1;
                     /* Having just one child is enough to stay around */
                     break;
                 }
             }
-        } while (!shutdown_pending && active_children &&
+        } while (!retained->mpm->shutdown_pending && active_children &&
                  (!ap_graceful_shutdown_timeout || apr_time_now() < cutoff));
 
         /* We might be here because we received SIGTERM, either
          * way, try and make sure that all of our processes are
          * really dead.
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
-        ap_reclaim_child_processes(1);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
+        ap_reclaim_child_processes(1, event_note_child_killed);
 
         return DONE;
     }
 
     /* we've been told to restart */
-    apr_signal(SIGHUP, SIG_IGN);
-
     if (one_process) {
         /* not worth thinking about */
         return DONE;
@@ -2346,16 +3030,18 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
     /* XXX: we really need to make sure this new generation number isn't in
      * use by any of the children.
      */
-    ++my_generation;
-    ap_scoreboard_image->global->running_generation = my_generation;
+    ++retained->mpm->my_generation;
+    ap_scoreboard_image->global->running_generation = retained->mpm->my_generation;
 
-    if (is_graceful) {
-        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+    if (!retained->mpm->is_ungraceful) {
+        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00493)
                      AP_SIG_GRACEFUL_STRING
                      " received.  Doing graceful restart");
         /* wake up the children...time to die.  But we'll have more soon */
-        ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
-
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
+                               AP_MPM_PODX_GRACEFUL);
+        }
 
         /* This is mostly for debugging... so that we know what is still
          * gracefully dealing with existing request.
@@ -2367,16 +3053,71 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
          * and a SIGHUP, we may as well use the same signal, because some user
          * pthreads are stealing signals from us left and right.
          */
-        ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
+        for (i = 0; i < num_buckets; i++) {
+            ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
+                               AP_MPM_PODX_RESTART);
+        }
 
-        ap_reclaim_child_processes(1);  /* Start with SIGTERM */
-        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
+        ap_reclaim_child_processes(1,  /* Start with SIGTERM */
+                                   event_note_child_killed);
+        ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00494)
                      "SIGHUP received.  Attempting to restart");
     }
 
+    active_daemons = 0;
+
+    return OK;
+}
+
+static void setup_slave_conn(conn_rec *c, void *csd) 
+{
+    event_conn_state_t *mcs;
+    event_conn_state_t *cs;
+    
+    mcs = ap_get_module_config(c->master->conn_config, &mpm_event_module);
+    
+    cs = apr_pcalloc(c->pool, sizeof(*cs));
+    cs->c = c;
+    cs->r = NULL;
+    cs->sc = mcs->sc;
+    cs->suspended = 0;
+    cs->p = c->pool;
+    cs->bucket_alloc = c->bucket_alloc;
+    cs->pfd = mcs->pfd;
+    cs->pub = mcs->pub;
+    cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
+    cs->pub.sense = CONN_SENSE_DEFAULT;
+    
+    c->cs = &(cs->pub);
+    ap_set_module_config(c->conn_config, &mpm_event_module, cs);
+}
+
+static int event_pre_connection(conn_rec *c, void *csd)
+{
+    if (c->master && (!c->cs || c->cs == c->master->cs)) {
+        setup_slave_conn(c, csd);
+    }
     return OK;
 }
 
+static int event_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
+                                 const char *protocol)
+{
+    if (!r && s) {
+        /* connection based switching of protocol, set the correct server
+         * configuration, so that timeouts, keepalives and such are used
+         * for the server that the connection was switched on.
+         * Normally, we set this on post_read_request, but on a protocol
+         * other than http/1.1, this might never happen.
+         */
+        event_conn_state_t *cs;
+        
+        cs = ap_get_module_config(c->conn_config, &mpm_event_module);
+        cs->sc = ap_get_module_config(s->module_config, &mpm_event_module);
+    }
+    return DECLINED;
+}
+
 /* This really should be a post_config hook, but the error log is already
  * redirected by that point, so we need to do this in the open_logs phase.
  */
@@ -2385,12 +3126,15 @@ static int event_open_logs(apr_pool_t * p, apr_pool_t * plog,
 {
     int startup = 0;
     int level_flags = 0;
+    int num_buckets = 0;
+    ap_listen_rec **listen_buckets;
     apr_status_t rv;
+    int i;
 
     pconf = p;
 
     /* the reverse of pre_config, we want this only the first time around */
-    if (retained->module_loads == 1) {
+    if (retained->mpm->module_loads == 1) {
         startup = 1;
         level_flags |= APLOG_STARTUP;
     }
@@ -2399,17 +3143,67 @@ static int event_open_logs(apr_pool_t * p, apr_pool_t * plog,
         ap_log_error(APLOG_MARK, APLOG_ALERT | level_flags, 0,
                      (startup ? NULL : s),
                      "no listening sockets available, shutting down");
-        return DONE;
+        return !OK;
     }
 
-    if (!one_process) {
-        if ((rv = ap_event_pod_open(pconf, &pod))) {
+    if (one_process) {
+        num_buckets = 1;
+    }
+    else if (retained->mpm->was_graceful) {
+        /* Preserve the number of buckets on graceful restarts. */
+        num_buckets = retained->mpm->num_buckets;
+    }
+    if ((rv = ap_duplicate_listeners(pconf, ap_server_conf,
+                                     &listen_buckets, &num_buckets))) {
+        ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
+                     (startup ? NULL : s),
+                     "could not duplicate listeners");
+        return !OK;
+    }
+
+    all_buckets = apr_pcalloc(pconf, num_buckets * sizeof(*all_buckets));
+    for (i = 0; i < num_buckets; i++) {
+        if (!one_process && /* no POD in one_process mode */
+                (rv = ap_mpm_podx_open(pconf, &all_buckets[i].pod))) {
             ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
                          (startup ? NULL : s),
                          "could not open pipe-of-death");
-            return DONE;
+            return !OK;
+        }
+        all_buckets[i].listeners = listen_buckets[i];
+    }
+
+    if (retained->mpm->max_buckets < num_buckets) {
+        int new_max, *new_ptr;
+        new_max = retained->mpm->max_buckets * 2;
+        if (new_max < num_buckets) {
+            new_max = num_buckets;
+        }
+        new_ptr = (int *)apr_palloc(ap_pglobal, new_max * sizeof(int));
+        memcpy(new_ptr, retained->idle_spawn_rate,
+               retained->mpm->num_buckets * sizeof(int));
+        retained->idle_spawn_rate = new_ptr;
+        retained->mpm->max_buckets = new_max;
+    }
+    if (retained->mpm->num_buckets < num_buckets) {
+        int rate_max = 1;
+        /* If new buckets are added, set their idle spawn rate to
+         * the highest so far, so that they get filled as quickly
+         * as the existing ones.
+         */
+        for (i = 0; i < retained->mpm->num_buckets; i++) {
+            if (rate_max < retained->idle_spawn_rate[i]) {
+                rate_max = retained->idle_spawn_rate[i];
+            }
+        }
+        for (/* up to date i */; i < num_buckets; i++) {
+            retained->idle_spawn_rate[i] = rate_max;
         }
     }
+    retained->mpm->num_buckets = num_buckets;
+
+    /* for skiplist */
+    srand((unsigned int)apr_time_now());
     return OK;
 }
 
@@ -2419,8 +3213,7 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
     int no_detach, debug, foreground;
     apr_status_t rv;
     const char *userdata_key = "mpm_event_module";
-
-    mpm_state = AP_MPMQ_STARTING;
+    int test_atomics = 0;
 
     debug = ap_exists_config_define("DEBUG");
 
@@ -2434,18 +3227,41 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
         foreground = ap_exists_config_define("FOREGROUND");
     }
 
-    /* sigh, want this only the second time around */
     retained = ap_retained_data_get(userdata_key);
     if (!retained) {
         retained = ap_retained_data_create(userdata_key, sizeof(*retained));
+        retained->mpm = ap_unixd_mpm_get_retained_data();
+        retained->max_daemons_limit = -1;
+        if (retained->mpm->module_loads) {
+            test_atomics = 1;
+        }
+    }
+    retained->mpm->mpm_state = AP_MPMQ_STARTING;
+    if (retained->mpm->baton != retained) {
+        retained->mpm->was_graceful = 0;
+        retained->mpm->baton = retained;
+    }
+    ++retained->mpm->module_loads;
+
+    /* test once for correct operation of fdqueue */
+    if (test_atomics || retained->mpm->module_loads == 2) {
+        static apr_uint32_t foo1, foo2;
+
+        apr_atomic_set32(&foo1, 100);
+        foo2 = apr_atomic_add32(&foo1, -10);
+        if (foo2 != 100 || foo1 != 90) {
+            ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL, APLOGNO(02405)
+                         "atomics not working as expected - add32 of negative number");
+            return HTTP_INTERNAL_SERVER_ERROR;
+        }
     }
-    ++retained->module_loads;
-    if (retained->module_loads == 2) {
-        is_graceful = 0;
+
+    /* sigh, want this only the second time around */
+    if (retained->mpm->module_loads == 2) {
         rv = apr_pollset_create(&event_pollset, 1, plog,
                                 APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
         if (rv != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
+            ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00495)
                          "Couldn't create a Thread Safe Pollset. "
                          "Is it supported on your platform?"
                          "Also check system or user limits!");
@@ -2454,32 +3270,95 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
         apr_pollset_destroy(event_pollset);
 
         if (!one_process && !foreground) {
+            /* before we detach, setup crash handlers to log to errorlog */
+            ap_fatal_signal_setup(ap_server_conf, pconf);
             rv = apr_proc_detach(no_detach ? APR_PROC_DETACH_FOREGROUND
                                  : APR_PROC_DETACH_DAEMONIZE);
             if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00496)
                              "apr_proc_detach failed");
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
         }
-        parent_pid = ap_my_pid = getpid();
     }
 
+    parent_pid = ap_my_pid = getpid();
+
     ap_listen_pre_config();
     ap_daemons_to_start = DEFAULT_START_DAEMON;
     min_spare_threads = DEFAULT_MIN_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD;
     max_spare_threads = DEFAULT_MAX_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD;
     server_limit = DEFAULT_SERVER_LIMIT;
     thread_limit = DEFAULT_THREAD_LIMIT;
-    ap_daemons_limit = server_limit;
+    active_daemons_limit = server_limit;
     threads_per_child = DEFAULT_THREADS_PER_CHILD;
-    max_clients = ap_daemons_limit * threads_per_child;
-    ap_pid_fname = DEFAULT_PIDLOG;
-    ap_max_requests_per_child = DEFAULT_MAX_REQUESTS_PER_CHILD;
+    max_workers = active_daemons_limit * threads_per_child;
+    defer_linger_chain = NULL;
+    had_healthy_child = 0;
     ap_extended_status = 0;
-    ap_max_mem_free = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
 
-    apr_cpystrn(ap_coredump_dir, ap_server_root, sizeof(ap_coredump_dir));
+    return OK;
+}
+
+static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
+                             apr_pool_t *ptemp, server_rec *s)
+{
+    struct {
+        struct timeout_queue *tail, *q;
+        apr_hash_t *hash;
+    } wc, ka;
+
+    /* Not needed in pre_config stage */
+    if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
+        return OK;
+    }
+
+    wc.tail = ka.tail = NULL;
+    wc.hash = apr_hash_make(ptemp);
+    ka.hash = apr_hash_make(ptemp);
+
+    linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER),
+                             NULL);
+    short_linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(SECONDS_TO_LINGER),
+                                   NULL);
+
+    for (; s; s = s->next) {
+        event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
+
+        ap_set_module_config(s->module_config, &mpm_event_module, sc);
+        if (!wc.tail) {
+            /* The main server uses the global queues */
+            wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
+            apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
+            wc.tail = write_completion_q = wc.q;
+
+            ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, NULL);
+            apr_hash_set(ka.hash, &s->keep_alive_timeout,
+                         sizeof s->keep_alive_timeout, ka.q);
+            ka.tail = keepalive_q = ka.q;
+        }
+        else {
+            /* The vhosts use any existing queue with the same timeout,
+             * or their own queue(s) if there isn't */
+            wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
+            if (!wc.q) {
+                wc.q = TO_QUEUE_MAKE(pconf, s->timeout, wc.tail);
+                apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
+                wc.tail = wc.tail->next = wc.q;
+            }
+
+            ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout,
+                                sizeof s->keep_alive_timeout);
+            if (!ka.q) {
+                ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, ka.tail);
+                apr_hash_set(ka.hash, &s->keep_alive_timeout,
+                             sizeof s->keep_alive_timeout, ka.q);
+                ka.tail = ka.tail->next = ka.q;
+            }
+        }
+        sc->wc_q = wc.q;
+        sc->ka_q = ka.q;
+    }
 
     return OK;
 }
@@ -2490,20 +3369,18 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     int startup = 0;
 
     /* the reverse of pre_config, we want this only the first time around */
-    if (retained->module_loads == 1) {
+    if (retained->mpm->module_loads == 1) {
         startup = 1;
     }
 
     if (server_limit > MAX_SERVER_LIMIT) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00497)
                          "WARNING: ServerLimit of %d exceeds compile-time "
-                         "limit of", server_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " %d servers, decreasing to %d.",
-                         MAX_SERVER_LIMIT, MAX_SERVER_LIMIT);
+                         "limit of %d servers, decreasing to %d.",
+                         server_limit, MAX_SERVER_LIMIT, MAX_SERVER_LIMIT);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00498)
                          "ServerLimit of %d exceeds compile-time limit "
                          "of %d, decreasing to match",
                          server_limit, MAX_SERVER_LIMIT);
@@ -2512,11 +3389,11 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (server_limit < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00499)
                          "WARNING: ServerLimit of %d not allowed, "
                          "increasing to 1.", server_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00500)
                          "ServerLimit of %d not allowed, increasing to 1",
                          server_limit);
         }
@@ -2531,7 +3408,7 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (server_limit != retained->first_server_limit) {
         /* don't need a startup console version here */
-        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00501)
                      "changing ServerLimit to %d from original value of %d "
                      "not allowed during restart",
                      server_limit, retained->first_server_limit);
@@ -2540,14 +3417,12 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (thread_limit > MAX_THREAD_LIMIT) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00502)
                          "WARNING: ThreadLimit of %d exceeds compile-time "
-                         "limit of", thread_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " %d threads, decreasing to %d.",
-                         MAX_THREAD_LIMIT, MAX_THREAD_LIMIT);
+                         "limit of %d threads, decreasing to %d.",
+                         thread_limit, MAX_THREAD_LIMIT, MAX_THREAD_LIMIT);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00503)
                          "ThreadLimit of %d exceeds compile-time limit "
                          "of %d, decreasing to match",
                          thread_limit, MAX_THREAD_LIMIT);
@@ -2556,11 +3431,11 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (thread_limit < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00504)
                          "WARNING: ThreadLimit of %d not allowed, "
                          "increasing to 1.", thread_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00505)
                          "ThreadLimit of %d not allowed, increasing to 1",
                          thread_limit);
         }
@@ -2575,7 +3450,7 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (thread_limit != retained->first_thread_limit) {
         /* don't need a startup console version here */
-        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00506)
                      "changing ThreadLimit to %d from original value of %d "
                      "not allowed during restart",
                      thread_limit, retained->first_thread_limit);
@@ -2584,17 +3459,13 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (threads_per_child > thread_limit) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00507)
                          "WARNING: ThreadsPerChild of %d exceeds ThreadLimit "
-                         "of", threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " %d threads, decreasing to %d.",
-                         thread_limit, thread_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " To increase, please see the ThreadLimit "
-                         "directive.");
+                         "of %d threads, decreasing to %d. "
+                         "To increase, please see the ThreadLimit directive.",
+                         threads_per_child, thread_limit, thread_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00508)
                          "ThreadsPerChild of %d exceeds ThreadLimit "
                          "of %d, decreasing to match",
                          threads_per_child, thread_limit);
@@ -2603,92 +3474,82 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
     }
     else if (threads_per_child < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00509)
                          "WARNING: ThreadsPerChild of %d not allowed, "
                          "increasing to 1.", threads_per_child);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00510)
                          "ThreadsPerChild of %d not allowed, increasing to 1",
                          threads_per_child);
         }
         threads_per_child = 1;
     }
 
-    if (max_clients < threads_per_child) {
+    if (max_workers < threads_per_child) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d is less than "
-                         "ThreadsPerChild of", max_clients);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " %d, increasing to %d.  MaxClients must be at "
-                         "least as large",
-                         threads_per_child, threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " as the number of threads in a single server.");
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00511)
+                         "WARNING: MaxRequestWorkers of %d is less than "
+                         "ThreadsPerChild of %d, increasing to %d. "
+                         "MaxRequestWorkers must be at least as large "
+                         "as the number of threads in a single server.",
+                         max_workers, threads_per_child, threads_per_child);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d is less than ThreadsPerChild "
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00512)
+                         "MaxRequestWorkers of %d is less than ThreadsPerChild "
                          "of %d, increasing to match",
-                         max_clients, threads_per_child);
+                         max_workers, threads_per_child);
         }
-        max_clients = threads_per_child;
+        max_workers = threads_per_child;
     }
 
-    ap_daemons_limit = max_clients / threads_per_child;
+    active_daemons_limit = max_workers / threads_per_child;
 
-    if (max_clients % threads_per_child) {
-        int tmp_max_clients = ap_daemons_limit * threads_per_child;
+    if (max_workers % threads_per_child) {
+        int tmp_max_workers = active_daemons_limit * threads_per_child;
 
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d is not an integer "
-                         "multiple of", max_clients);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " ThreadsPerChild of %d, decreasing to nearest "
-                         "multiple %d,", threads_per_child,
-                         tmp_max_clients);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " for a maximum of %d servers.",
-                         ap_daemons_limit);
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00513)
+                         "WARNING: MaxRequestWorkers of %d is not an integer "
+                         "multiple of ThreadsPerChild of %d, decreasing to nearest "
+                         "multiple %d, for a maximum of %d servers.",
+                         max_workers, threads_per_child, tmp_max_workers,
+                         active_daemons_limit);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d is not an integer multiple of "
-                         "ThreadsPerChild of %d, decreasing to nearest "
-                         "multiple %d", max_clients, threads_per_child,
-                         tmp_max_clients);
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00514)
+                         "MaxRequestWorkers of %d is not an integer multiple "
+                         "of ThreadsPerChild of %d, decreasing to nearest "
+                         "multiple %d", max_workers, threads_per_child,
+                         tmp_max_workers);
         }
-        max_clients = tmp_max_clients;
+        max_workers = tmp_max_workers;
     }
 
-    if (ap_daemons_limit > server_limit) {
+    if (active_daemons_limit > server_limit) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d would require %d "
-                         "servers and ", max_clients, ap_daemons_limit);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " would exceed ServerLimit of %d, decreasing to %d.",
-                         server_limit, server_limit * threads_per_child);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " To increase, please see the ServerLimit "
-                         "directive.");
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00515)
+                         "WARNING: MaxRequestWorkers of %d would require %d servers "
+                         "and would exceed ServerLimit of %d, decreasing to %d. "
+                         "To increase, please see the ServerLimit directive.",
+                         max_workers, active_daemons_limit, server_limit,
+                         server_limit * threads_per_child);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d would require %d servers and "
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00516)
+                         "MaxRequestWorkers of %d would require %d servers and "
                          "exceed ServerLimit of %d, decreasing to %d",
-                         max_clients, ap_daemons_limit, server_limit,
+                         max_workers, active_daemons_limit, server_limit,
                          server_limit * threads_per_child);
         }
-        ap_daemons_limit = server_limit;
+        active_daemons_limit = server_limit;
     }
 
-    /* ap_daemons_to_start > ap_daemons_limit checked in ap_mpm_run() */
-    if (ap_daemons_to_start < 0) {
+    /* ap_daemons_to_start > active_daemons_limit checked in ap_mpm_run() */
+    if (ap_daemons_to_start < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00517)
                          "WARNING: StartServers of %d not allowed, "
                          "increasing to 1.", ap_daemons_to_start);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00518)
                          "StartServers of %d not allowed, increasing to 1",
                          ap_daemons_to_start);
         }
@@ -2697,15 +3558,13 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
 
     if (min_spare_threads < 1) {
         if (startup) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
+            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00519)
                          "WARNING: MinSpareThreads of %d not allowed, "
-                         "increasing to 1", min_spare_threads);
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " to avoid almost certain server failure.");
-            ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " Please read the documentation.");
+                         "increasing to 1 to avoid almost certain server "
+                         "failure. Please read the documentation.",
+                         min_spare_threads);
         } else {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
+            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00520)
                          "MinSpareThreads of %d not allowed, increasing to 1",
                          min_spare_threads);
         }
@@ -2733,13 +3592,18 @@ static void event_hooks(apr_pool_t * p)
      * to retrieve it, so register as REALLY_FIRST
      */
     ap_hook_pre_config(event_pre_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
+    ap_hook_post_config(event_post_config, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_check_config(event_check_config, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm(event_run, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_query(event_query, NULL, NULL, APR_HOOK_MIDDLE);
-    ap_hook_mpm_note_child_killed(event_note_child_killed, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_register_timed_callback(event_register_timed_callback, NULL, NULL,
                                         APR_HOOK_MIDDLE);
+    ap_hook_pre_read_request(event_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE);
+    ap_hook_post_read_request(event_post_read_request, NULL, NULL, APR_HOOK_MIDDLE);
     ap_hook_mpm_get_name(event_get_name, NULL, NULL, APR_HOOK_MIDDLE);
+
+    ap_hook_pre_connection(event_pre_connection, NULL, NULL, APR_HOOK_REALLY_FIRST);
+    ap_hook_protocol_switch(event_protocol_switch, NULL, NULL, APR_HOOK_REALLY_FIRST);
 }
 
 static const char *set_daemons_to_start(cmd_parms *cmd, void *dummy,
@@ -2778,15 +3642,19 @@ static const char *set_max_spare_threads(cmd_parms * cmd, void *dummy,
     return NULL;
 }
 
-static const char *set_max_clients(cmd_parms * cmd, void *dummy,
+static const char *set_max_workers(cmd_parms * cmd, void *dummy,
                                    const char *arg)
 {
     const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
     if (err != NULL) {
         return err;
     }
-
-    max_clients = atoi(arg);
+    if (!strcasecmp(cmd->cmd->name, "MaxClients")) {
+        ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(00521)
+                     "MaxClients is deprecated, use MaxRequestWorkers "
+                     "instead.");
+    }
+    max_workers = atoi(arg);
     return NULL;
 }
 
@@ -2824,6 +3692,30 @@ static const char *set_thread_limit(cmd_parms * cmd, void *dummy,
     return NULL;
 }
 
+static const char *set_worker_factor(cmd_parms * cmd, void *dummy,
+                                     const char *arg)
+{
+    double val;
+    char *endptr;
+    const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+    if (err != NULL) {
+        return err;
+    }
+
+    val = strtod(arg, &endptr);
+    if (*endptr)
+        return "error parsing value";
+
+    if (val <= 0)
+        return "AsyncRequestWorkerFactor argument must be a positive number";
+
+    worker_factor = val * WORKER_FACTOR_SCALE;
+    if (worker_factor == 0)
+        worker_factor = 1;
+    return NULL;
+}
+
+
 static const command_rec event_cmds[] = {
     LISTEN_COMMANDS,
     AP_INIT_TAKE1("StartServers", set_daemons_to_start, NULL, RSRC_CONF,
@@ -2834,13 +3726,18 @@ static const command_rec event_cmds[] = {
                   "Minimum number of idle threads, to handle request spikes"),
     AP_INIT_TAKE1("MaxSpareThreads", set_max_spare_threads, NULL, RSRC_CONF,
                   "Maximum number of idle threads"),
-    AP_INIT_TAKE1("MaxClients", set_max_clients, NULL, RSRC_CONF,
+    AP_INIT_TAKE1("MaxClients", set_max_workers, NULL, RSRC_CONF,
+                  "Deprecated name of MaxRequestWorkers"),
+    AP_INIT_TAKE1("MaxRequestWorkers", set_max_workers, NULL, RSRC_CONF,
                   "Maximum number of threads alive at the same time"),
     AP_INIT_TAKE1("ThreadsPerChild", set_threads_per_child, NULL, RSRC_CONF,
                   "Number of threads each child creates"),
     AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF,
                   "Maximum number of worker threads per child process for this "
                   "run of Apache - Upper limit for ThreadsPerChild"),
+    AP_INIT_TAKE1("AsyncRequestWorkerFactor", set_worker_factor, NULL, RSRC_CONF,
+                  "How many additional connects will be accepted per idle "
+                  "worker thread"),
     AP_GRACEFUL_SHUTDOWN_TIMEOUT_COMMAND,
     {NULL}
 };