]> granicus.if.org Git - apache/blobdiff - server/mpm/event/event.c
Cleanup effort in prep for GA push:
[apache] / server / mpm / event / event.c
index 066d9b6f523c93af2d2fb39f3e261b9589748b48..00084778d572f0dfc4145da4c5cc1092cc40f935 100644 (file)
 #define apr_time_from_msec(x) (x * 1000)
 #endif
 
+#ifndef MAX_SECS_TO_LINGER
+#define MAX_SECS_TO_LINGER 30
+#endif
+#define SECONDS_TO_LINGER  2
+
 /*
  * Actual definitions of config globals
  */
 
+#ifndef DEFAULT_WORKER_FACTOR
+#define DEFAULT_WORKER_FACTOR 2
+#endif
+#define WORKER_FACTOR_SCALE   16  /* scale factor to allow fractional values */
+static unsigned int worker_factor = DEFAULT_WORKER_FACTOR * WORKER_FACTOR_SCALE;
+
 static int threads_per_child = 0;   /* Worker threads per child */
 static int ap_daemons_to_start = 0;
 static int min_spare_threads = 0;
 static int max_spare_threads = 0;
 static int ap_daemons_limit = 0;
-static int max_clients = 0;
+static int max_workers = 0;
 static int server_limit = 0;
 static int thread_limit = 0;
 static int dying = 0;
@@ -166,6 +177,7 @@ static int start_thread_may_exit = 0;
 static int listener_may_exit = 0;
 static int requests_this_child;
 static int num_listensocks = 0;
+static apr_uint32_t connection_count = 0;
 static int resource_shortage = 0;
 static fd_queue_t *worker_queue;
 static fd_queue_info_t *worker_queue_info;
@@ -173,7 +185,38 @@ static int mpm_state = AP_MPMQ_STARTING;
 
 static apr_thread_mutex_t *timeout_mutex;
 APR_RING_HEAD(timeout_head_t, conn_state_t);
-static struct timeout_head_t timeout_head, keepalive_timeout_head;
+struct timeout_queue {
+    struct timeout_head_t head;
+    int count;
+    const char *tag;
+};
+static struct timeout_queue write_completion_q, keepalive_q, linger_q,
+                            short_linger_q;
+static apr_pollfd_t *listener_pollfd;
+
+/*
+ * Macros for accessing struct timeout_queue.
+ * For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held.
+ */
+#define TO_QUEUE_APPEND(q, el)                                            \
+    do {                                                                  \
+        APR_RING_INSERT_TAIL(&(q).head, el, conn_state_t, timeout_list);  \
+        (q).count++;                                                      \
+    } while (0)
+
+#define TO_QUEUE_REMOVE(q, el)             \
+    do {                                   \
+        APR_RING_REMOVE(el, timeout_list); \
+        (q).count--;                       \
+    } while (0)
+
+#define TO_QUEUE_INIT(q)                                            \
+    do {                                                            \
+            APR_RING_INIT(&(q).head, conn_state_t, timeout_list);   \
+            (q).tag = #q;                                           \
+    } while (0)
+
+#define TO_QUEUE_ELEM_INIT(el) APR_RING_ELEM_INIT(el, timeout_list)
 
 static apr_pollset_t *event_pollset;
 
@@ -217,7 +260,6 @@ typedef enum
 typedef struct
 {
     poll_type_e type;
-    int bypass_push;
     void *baton;
 } listener_poll_type;
 
@@ -235,8 +277,8 @@ typedef struct event_retained_data {
     int maxclients_reported;
     /*
      * The max child slot ever assigned, preserved across restarts.  Necessary
-     * to deal with MaxClients changes across AP_SIG_GRACEFUL restarts.  We
-     * use this value to optimize routines that have to scan the entire
+     * to deal with MaxRequestWorkers changes across AP_SIG_GRACEFUL restarts.
+     * We use this value to optimize routines that have to scan the entire
      * scoreboard.
      */
     int max_daemons_limit;
@@ -298,6 +340,32 @@ static apr_os_thread_t *listener_os_thread;
  */
 static apr_socket_t **worker_sockets;
 
+static void disable_listensocks(int process_slot)
+{
+    int i;
+    for (i = 0; i < num_listensocks; i++) {
+        apr_pollset_remove(event_pollset, &listener_pollfd[i]);
+    }
+    ap_scoreboard_image->parent[process_slot].not_accepting = 1;
+}
+
+static void enable_listensocks(int process_slot)
+{
+    int i;
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+                 "Accepting new connections again: "
+                 "%u active conns, %u idle workers",
+                 apr_atomic_read32(&connection_count),
+                 ap_queue_info_get_idlers(worker_queue_info));
+    for (i = 0; i < num_listensocks; i++)
+        apr_pollset_add(event_pollset, &listener_pollfd[i]);
+    /*
+     * XXX: This is not yet optimal. If many workers suddenly become available,
+     * XXX: the parent may kill some processes off too soon.
+     */
+    ap_scoreboard_image->parent[process_slot].not_accepting = 0;
+}
+
 static void close_worker_sockets(void)
 {
     int i;
@@ -322,7 +390,7 @@ static void wakeup_listener(void)
     }
 
     /* unblock the listener if it's waiting for a worker */
-    ap_queue_info_term(worker_queue_info); 
+    ap_queue_info_term(worker_queue_info);
 
     /*
      * we should just be able to "kill(ap_my_pid, LISTENER_SIGNAL)" on all
@@ -504,9 +572,8 @@ static int child_fatal;
 static int volatile shutdown_pending;
 static int volatile restart_pending;
 ap_generation_t volatile ap_my_generation = 0;
-static apr_uint32_t connection_count = 0;
 
-static apr_status_t decrement_connection_count(void *dummy){
+static apr_status_t decrement_connection_count(void *dummy) {
     apr_atomic_dec32(&connection_count);
     return APR_SUCCESS;
 }
@@ -654,6 +721,69 @@ static void set_signals(void)
 #endif
 }
 
+static int start_lingering_close(conn_state_t *cs)
+{
+    apr_status_t rv;
+    if (ap_start_lingering_close(cs->c)) {
+        apr_pool_clear(cs->p);
+        ap_push_pool(worker_queue_info, cs->p);
+        return 0;
+    }
+    else {
+        apr_socket_t *csd = ap_get_conn_socket(cs->c);
+        struct timeout_queue *q;
+
+        rv = apr_socket_timeout_set(csd, 0);
+        AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+        /*
+         * If some module requested a shortened waiting period, only wait for
+         * 2s (SECONDS_TO_LINGER). This is useful for mitigating certain
+         * DoS attacks.
+         */
+        if (apr_table_get(cs->c->notes, "short-lingering-close")) {
+            cs->expiration_time =
+                apr_time_now() + apr_time_from_sec(SECONDS_TO_LINGER);
+            q = &short_linger_q;
+            cs->state = CONN_STATE_LINGER_SHORT;
+        }
+        else {
+            cs->expiration_time =
+                apr_time_now() + apr_time_from_sec(MAX_SECS_TO_LINGER);
+            q = &linger_q;
+            cs->state = CONN_STATE_LINGER_NORMAL;
+        }
+        apr_thread_mutex_lock(timeout_mutex);
+        TO_QUEUE_APPEND(*q, cs);
+        apr_thread_mutex_unlock(timeout_mutex);
+        cs->pfd.reqevents = APR_POLLIN | APR_POLLHUP | APR_POLLERR;
+        rv = apr_pollset_add(event_pollset, &cs->pfd);
+        if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
+            ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
+                         "start_lingering_close: apr_pollset_add failure");
+            AP_DEBUG_ASSERT(0);
+        }
+    }
+    return 1;
+}
+
+static int stop_lingering_close(conn_state_t *cs)
+{
+    apr_status_t rv;
+    apr_socket_t *csd = ap_get_conn_socket(cs->c);
+    ap_log_error(APLOG_MARK, APLOG_TRACE4, 0, ap_server_conf,
+                 "socket reached timeout in lingering-close state");
+    rv = apr_socket_close(csd);
+    if (rv != APR_SUCCESS) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "error closing socket");
+        AP_DEBUG_ASSERT(0);
+    }
+    apr_pool_clear(cs->p);
+    ap_push_pool(worker_queue_info, cs->p);
+    return 0;
+}
+
+
+
 /*****************************************************************
  * Child process main loop.
  */
@@ -663,7 +793,6 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
                           int my_thread_num)
 {
     conn_rec *c;
-    listener_poll_type *pt;
     long conn_id = ID_FROM_CHILD_THREAD(my_child_num, my_thread_num);
     int rc;
     ap_sb_handle_t *sbh;
@@ -671,11 +800,8 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
     ap_create_sb_handle(&sbh, p, my_child_num, my_thread_num);
 
     if (cs == NULL) {           /* This is a new connection */
-
+        listener_poll_type *pt = apr_pcalloc(p, sizeof(*pt));
         cs = apr_pcalloc(p, sizeof(conn_state_t));
-
-        pt = apr_pcalloc(p, sizeof(*pt));
-
         cs->bucket_alloc = apr_bucket_alloc_create(p);
         c = ap_run_create_connection(p, ap_server_conf, sock,
                                      conn_id, sbh, cs->bucket_alloc);
@@ -689,17 +815,16 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
         cs->pfd.reqevents = APR_POLLIN;
         cs->pfd.desc.s = sock;
         pt->type = PT_CSD;
-        pt->bypass_push = 1;
         pt->baton = cs;
         cs->pfd.client_data = pt;
-        APR_RING_ELEM_INIT(cs, timeout_list);
+        TO_QUEUE_ELEM_INIT(cs);
 
         ap_update_vhost_given_ip(c);
 
         rc = ap_run_pre_connection(c, sock);
         if (rc != OK && rc != DONE) {
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
-                         "process_socket: connection aborted");
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+                          "process_socket: connection aborted");
             c->aborted = 1;
         }
 
@@ -722,7 +847,6 @@ static int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock
     else {
         c = cs->c;
         c->sbh = sbh;
-        pt = cs->pfd.client_data;
         c->current_thread = thd;
     }
 
@@ -755,13 +879,14 @@ read_request:
     if (cs->state == CONN_STATE_WRITE_COMPLETION) {
         ap_filter_t *output_filter = c->output_filters;
         apr_status_t rv;
+        ap_update_child_status_from_conn(sbh, SERVER_BUSY_WRITE, c);
         while (output_filter->next != NULL) {
             output_filter = output_filter->next;
         }
         rv = output_filter->frec->filter_func.out_func(output_filter, NULL);
         if (rv != APR_SUCCESS) {
-            ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf,
-                     "network write failure in core output filter");
+            ap_log_cerror(APLOG_MARK, APLOG_WARNING, rv, c,
+                          "network write failure in core output filter");
             cs->state = CONN_STATE_LINGER;
         }
         else if (c->data_in_output_filters) {
@@ -771,9 +896,8 @@ read_request:
              */
             cs->expiration_time = ap_server_conf->timeout + apr_time_now();
             apr_thread_mutex_lock(timeout_mutex);
-            APR_RING_INSERT_TAIL(&timeout_head, cs, conn_state_t, timeout_list);
+            TO_QUEUE_APPEND(write_completion_q, cs);
             apr_thread_mutex_unlock(timeout_mutex);
-            pt->bypass_push = 0;
             cs->pfd.reqevents = APR_POLLOUT | APR_POLLHUP | APR_POLLERR;
             rc = apr_pollset_add(event_pollset, &cs->pfd);
             return 1;
@@ -792,14 +916,11 @@ read_request:
     }
 
     if (cs->state == CONN_STATE_LINGER) {
-        ap_lingering_close(c);
-        apr_pool_clear(p);
-        ap_push_pool(worker_queue_info, p);
-        return 0;
+        if (!start_lingering_close(cs))
+            return 0;
     }
     else if (cs->state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
         apr_status_t rc;
-        listener_poll_type *pt = (listener_poll_type *) cs->pfd.client_data;
 
         /* It greatly simplifies the logic to use a single timeout value here
          * because the new element can just be added to the end of the list and
@@ -812,10 +933,9 @@ read_request:
         cs->expiration_time = ap_server_conf->keep_alive_timeout +
                               apr_time_now();
         apr_thread_mutex_lock(timeout_mutex);
-        APR_RING_INSERT_TAIL(&keepalive_timeout_head, cs, conn_state_t, timeout_list);
+        TO_QUEUE_APPEND(keepalive_q, cs);
         apr_thread_mutex_unlock(timeout_mutex);
 
-        pt->bypass_push = 0;
         /* Add work to pollset. */
         cs->pfd.reqevents = APR_POLLIN;
         rc = apr_pollset_add(event_pollset, &cs->pfd);
@@ -843,22 +963,16 @@ static void check_infinite_requests(void)
 }
 
 static void close_listeners(int process_slot, int *closed) {
-    if (!*closed){
-        ap_listen_rec *lr;
+    if (!*closed) {
         int i;
-        for (lr = ap_listeners; lr != NULL; lr = lr->next) {
-            apr_pollfd_t *pfd = apr_pcalloc(pchild, sizeof(*pfd));
-            pfd->desc_type = APR_POLL_SOCKET;
-            pfd->desc.s = lr->sd;
-            apr_pollset_remove(event_pollset, pfd);
-        }
+        disable_listensocks(process_slot);
         ap_close_listeners();
         *closed = 1;
         dying = 1;
         ap_scoreboard_image->parent[process_slot].quiescing = 1;
-        for (i = 0; i < threads_per_child; ++i){
+        for (i = 0; i < threads_per_child; ++i) {
             ap_update_child_status_from_indexes(process_slot, i,
-                                                SERVER_DEAD, NULL);
+                                                SERVER_GRACEFUL, NULL);
         }
         /* wake up the main thread */
         kill(ap_my_pid, SIGTERM);
@@ -893,7 +1007,7 @@ static apr_status_t s_socket_add(void *user_baton,
 {
     s_baton_t *s = (s_baton_t*)user_baton;
     /* XXXXX: recycle listener_poll_types */
-    listener_poll_type *pt = malloc(sizeof(*pt));
+    listener_poll_type *pt = ap_malloc(sizeof(*pt));
     pt->type = PT_SERF;
     pt->baton = serf_baton;
     pfd->client_data = pt;
@@ -918,12 +1032,18 @@ static apr_status_t init_pollset(apr_pool_t *p)
 #endif
     ap_listen_rec *lr;
     listener_poll_type *pt;
-
-    APR_RING_INIT(&timeout_head, conn_state_t, timeout_list);
-    APR_RING_INIT(&keepalive_timeout_head, conn_state_t, timeout_list);
-
-    for (lr = ap_listeners; lr != NULL; lr = lr->next) {
-        apr_pollfd_t *pfd = apr_palloc(p, sizeof(*pfd));
+    int i = 0;
+
+    TO_QUEUE_INIT(write_completion_q);
+    TO_QUEUE_INIT(keepalive_q);
+    TO_QUEUE_INIT(linger_q);
+    TO_QUEUE_INIT(short_linger_q);
+
+    listener_pollfd = apr_palloc(p, sizeof(apr_pollfd_t) * num_listensocks);
+    for (lr = ap_listeners; lr != NULL; lr = lr->next, i++) {
+        apr_pollfd_t *pfd;
+        AP_DEBUG_ASSERT(i < num_listensocks);
+        pfd = &listener_pollfd[i];
         pt = apr_pcalloc(p, sizeof(*pt));
         pfd->desc_type = APR_POLL_SOCKET;
         pfd->desc.s = lr->sd;
@@ -970,12 +1090,6 @@ static apr_status_t push2worker(const apr_pollfd_t * pfd,
     conn_state_t *cs = (conn_state_t *) pt->baton;
     apr_status_t rc;
 
-    if (pt->bypass_push) {
-        return APR_SUCCESS;
-    }
-
-    pt->bypass_push = 1;
-
     rc = apr_pollset_remove(pollset, pfd);
 
     /*
@@ -985,7 +1099,10 @@ static apr_status_t push2worker(const apr_pollfd_t * pfd,
      * and we still want to keep going
      */
     if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
-        cs->state = CONN_STATE_LINGER;
+        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+                     "pollset remove failed");
+        start_lingering_close(cs);
+        return rc;
     }
 
     rc = ap_queue_push(worker_queue, cs->pfd.desc.s, cs, cs->p);
@@ -1005,40 +1122,45 @@ static apr_status_t push2worker(const apr_pollfd_t * pfd,
 }
 
 /* get_worker:
- *     reserve a worker thread, block if all are currently busy.
- *     this prevents the worker queue from overflowing and lets
- *     other processes accept new connections in the mean time.
+ *     If *have_idle_worker_p == 0, reserve a worker thread, and set
+ *     *have_idle_worker_p = 1.
+ *     If *have_idle_worker_p is already 1, will do nothing.
+ *     If blocking == 1, block if all workers are currently busy.
+ *     If no worker was available immediately, will set *all_busy to 1.
+ *     XXX: If there are no workers, we should not block immediately but
+ *     XXX: close all keep-alive connections first.
  */
-static int get_worker(int *have_idle_worker_p)
+static void get_worker(int *have_idle_worker_p, int blocking, int *all_busy)
 {
     apr_status_t rc;
 
-    if (!*have_idle_worker_p) {
-        rc = ap_queue_info_wait_for_idler(worker_queue_info);
-
-        if (rc == APR_SUCCESS) {
-            *have_idle_worker_p = 1;
-            return 1;
-        }
-        else {
-            if (!APR_STATUS_IS_EOF(rc)) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
-                             "ap_queue_info_wait_for_idler failed.  "
-                             "Attempting to shutdown process gracefully");
-                signal_threads(ST_GRACEFUL);
-            }
-            return 0;
-        }
-    }
-    else {
+    if (*have_idle_worker_p) {
         /* already reserved a worker thread - must have hit a
          * transient error on a previous pass
          */
-        return 1;
+        return;
+    }
+
+    if (blocking)
+        rc = ap_queue_info_wait_for_idler(worker_queue_info, all_busy);
+    else
+        rc = ap_queue_info_try_get_idler(worker_queue_info);
+
+    if (rc == APR_SUCCESS) {
+        *have_idle_worker_p = 1;
+    }
+    else if (!blocking && rc == APR_EAGAIN) {
+        *all_busy = 1;
+    }
+    else if (!APR_STATUS_IS_EOF(rc)) {
+        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+                     "ap_queue_info_wait_for_idler failed.  "
+                     "Attempting to shutdown process gracefully");
+        signal_threads(ST_GRACEFUL);
     }
 }
 
-/* XXXXXX: Convert to skiplist or other better data structure 
+/* XXXXXX: Convert to skiplist or other better data structure
  * (yes, this is VERY VERY VERY VERY BAD)
  */
 
@@ -1065,7 +1187,7 @@ static apr_status_t event_register_timed_callback(apr_time_t t,
     }
     else {
         /* XXXXX: lol, pool allocation without a context from any thread.Yeah. Right. MPMs Suck. */
-        te = malloc(sizeof(timer_event_t));
+        te = ap_malloc(sizeof(timer_event_t));
         APR_RING_ELEM_INIT(te, link);
     }
 
@@ -1086,7 +1208,7 @@ static apr_status_t event_register_timed_callback(apr_time_t t,
             break;
         }
     }
-    
+
     if (!inserted) {
         APR_RING_INSERT_TAIL(&timer_ring, te, timer_event_t, link);
     }
@@ -1096,6 +1218,79 @@ static apr_status_t event_register_timed_callback(apr_time_t t,
     return APR_SUCCESS;
 }
 
+static void process_lingering_close(conn_state_t *cs, const apr_pollfd_t *pfd)
+{
+    apr_socket_t *csd = ap_get_conn_socket(cs->c);
+    char dummybuf[2048];
+    apr_size_t nbytes;
+    apr_status_t rv;
+    struct timeout_queue *q;
+    q = (cs->state == CONN_STATE_LINGER_SHORT) ?  &short_linger_q : &linger_q;
+
+    /* socket is already in non-blocking state */
+    do {
+        nbytes = sizeof(dummybuf);
+        rv = apr_socket_recv(csd, dummybuf, &nbytes);
+    } while (rv == APR_SUCCESS);
+
+    if (!APR_STATUS_IS_EOF(rv)) {
+        return;
+    }
+
+    rv = apr_pollset_remove(event_pollset, pfd);
+    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+    rv = apr_socket_close(csd);
+    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
+
+    apr_thread_mutex_lock(timeout_mutex);
+    TO_QUEUE_REMOVE(*q, cs);
+    apr_thread_mutex_unlock(timeout_mutex);
+    TO_QUEUE_ELEM_INIT(cs);
+
+    apr_pool_clear(cs->p);
+    ap_push_pool(worker_queue_info, cs->p);
+}
+
+/* call 'func' for all elements of 'q' with timeout less than 'timeout_time'.
+ * Pre-condition: timeout_mutex must already be locked
+ * Post-condition: timeout_mutex will be locked again
+ */
+static void process_timeout_queue(struct timeout_queue *q,
+                                  apr_time_t timeout_time,
+                                  int (*func)(conn_state_t *))
+{
+    int count = 0;
+    conn_state_t *first, *cs, *last;
+    if (!q->count) {
+        return;
+    }
+    AP_DEBUG_ASSERT(!APR_RING_EMPTY(&q->head, conn_state_t, timeout_list));
+
+    cs = first = APR_RING_FIRST(&q->head);
+    while (cs != APR_RING_SENTINEL(&q->head, conn_state_t, timeout_list)
+           && cs->expiration_time < timeout_time) {
+        last = cs;
+        cs = APR_RING_NEXT(cs, timeout_list);
+        count++;
+    }
+    if (!count)
+        return;
+
+    APR_RING_UNSPLICE(first, last, timeout_list);
+    AP_DEBUG_ASSERT(q->count >= count);
+    q->count -= count;
+    apr_thread_mutex_unlock(timeout_mutex);
+    while (count) {
+        cs = APR_RING_NEXT(first, timeout_list);
+        TO_QUEUE_ELEM_INIT(first);
+        func(first);
+        first = cs;
+        count--;
+    }
+    apr_thread_mutex_lock(timeout_mutex);
+}
+
 static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
 {
     timer_event_t *ep;
@@ -1111,12 +1306,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     conn_state_t *cs;
     const apr_pollfd_t *out_pfd;
     apr_int32_t num = 0;
-    apr_time_t time_now = 0;
     apr_interval_time_t timeout_interval;
-    apr_time_t timeout_time;
+    apr_time_t timeout_time = 0, now, last_log;
     listener_poll_type *pt;
-    int closed = 0;
+    int closed = 0, listeners_disabled = 0;
 
+    last_log = apr_time_now();
     free(ti);
 
     /* the following times out events that are really close in the future
@@ -1143,7 +1338,8 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     apr_signal(LISTENER_SIGNAL, dummy_signal_handler);
 
     for (;;) {
-        if (listener_may_exit){
+        int workers_were_busy = 0;
+        if (listener_may_exit) {
             close_listeners(process_slot, &closed);
             if (terminate_mode == ST_UNGRACEFUL
                 || apr_atomic_read32(&connection_count) == 0)
@@ -1154,173 +1350,223 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
             check_infinite_requests();
         }
 
+        now = apr_time_now();
+        if (APLOGtrace6(ap_server_conf)) {
+            /* trace log status every second */
+            if (now - last_log > apr_time_from_msec(1000)) {
+                last_log = now;
+                apr_thread_mutex_lock(timeout_mutex);
+                ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
+                             "connections: %d (write-completion: %d "
+                             "keep-alive: %d lingering: %d)",
+                             connection_count, write_completion_q.count,
+                             keepalive_q.count,
+                             linger_q.count + short_linger_q.count);
+                apr_thread_mutex_unlock(timeout_mutex);
+            }
+        }
 
-        {
-            apr_time_t now = apr_time_now();
-            apr_thread_mutex_lock(g_timer_ring_mtx);
-
-            if (!APR_RING_EMPTY(&timer_ring, timer_event_t, link)) {
-                te = APR_RING_FIRST(&timer_ring);
-                if (te->when > now) {
-                    timeout_interval = te->when - now;
-                }
-                else {
-                    timeout_interval = 1;
-                }
+        apr_thread_mutex_lock(g_timer_ring_mtx);
+        if (!APR_RING_EMPTY(&timer_ring, timer_event_t, link)) {
+            te = APR_RING_FIRST(&timer_ring);
+            if (te->when > now) {
+                timeout_interval = te->when - now;
             }
             else {
-                timeout_interval = apr_time_from_msec(100);
+                timeout_interval = 1;
             }
-            apr_thread_mutex_unlock(g_timer_ring_mtx);
         }
+        else {
+            timeout_interval = apr_time_from_msec(100);
+        }
+        apr_thread_mutex_unlock(g_timer_ring_mtx);
 
 #if HAVE_SERF
         rc = serf_context_prerun(g_serf);
         if (rc != APR_SUCCESS) {
             /* TOOD: what should do here? ugh. */
         }
-        
 #endif
-        rc = apr_pollset_poll(event_pollset, timeout_interval, &num,
-                              &out_pfd);
-
+        rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd);
         if (rc != APR_SUCCESS) {
             if (APR_STATUS_IS_EINTR(rc)) {
                 continue;
             }
             if (!APR_STATUS_IS_TIMEUP(rc)) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf,
                              "apr_pollset_poll failed.  Attempting to "
                              "shutdown process gracefully");
                 signal_threads(ST_GRACEFUL);
             }
         }
 
-        if (listener_may_exit){
+        if (listener_may_exit) {
             close_listeners(process_slot, &closed);
-            if (terminate_mode == ST_UNGRACEFUL ||
-                apr_atomic_read32(&connection_count) == 0) {
+            if (terminate_mode == ST_UNGRACEFUL
+                || apr_atomic_read32(&connection_count) == 0)
                 break;
-            }
         }
 
+        now = apr_time_now();
+        apr_thread_mutex_lock(g_timer_ring_mtx);
+        for (ep = APR_RING_FIRST(&timer_ring);
+             ep != APR_RING_SENTINEL(&timer_ring,
+                                     timer_event_t, link);
+             ep = APR_RING_FIRST(&timer_ring))
         {
-            apr_time_t now = apr_time_now();
-            apr_thread_mutex_lock(g_timer_ring_mtx);
-            for (ep = APR_RING_FIRST(&timer_ring);
-                 ep != APR_RING_SENTINEL(&timer_ring,
-                                         timer_event_t, link);
-                 ep = APR_RING_FIRST(&timer_ring))
-            {
-                if (ep->when < now + EVENT_FUDGE_FACTOR) {
-                    APR_RING_REMOVE(ep, link);
-                    push_timer2worker(ep);
-                }
-                else {
-                    break;
-                }
+            if (ep->when < now + EVENT_FUDGE_FACTOR) {
+                APR_RING_REMOVE(ep, link);
+                push_timer2worker(ep);
+            }
+            else {
+                break;
             }
-            apr_thread_mutex_unlock(g_timer_ring_mtx);
         }
+        apr_thread_mutex_unlock(g_timer_ring_mtx);
 
-        while (num && get_worker(&have_idle_worker)) {
+        while (num) {
             pt = (listener_poll_type *) out_pfd->client_data;
             if (pt->type == PT_CSD) {
                 /* one of the sockets is readable */
+                struct timeout_queue *remove_from_q = &write_completion_q;
+                int blocking = 1;
                 cs = (conn_state_t *) pt->baton;
                 switch (cs->state) {
                 case CONN_STATE_CHECK_REQUEST_LINE_READABLE:
                     cs->state = CONN_STATE_READ_REQUEST_LINE;
-                    break;
+                    remove_from_q = &keepalive_q;
+                    /* don't wait for a worker for a keepalive request */
+                    blocking = 0;
+                    /* FALL THROUGH */
                 case CONN_STATE_WRITE_COMPLETION:
+                    get_worker(&have_idle_worker, blocking,
+                               &workers_were_busy);
+                    apr_thread_mutex_lock(timeout_mutex);
+                    TO_QUEUE_REMOVE(*remove_from_q, cs);
+                    apr_thread_mutex_unlock(timeout_mutex);
+                    TO_QUEUE_ELEM_INIT(cs);
+                    /* If we didn't get a worker immediately for a keep-alive
+                     * request, we close the connection, so that the client can
+                     * re-connect to a different process.
+                     */
+                    if (!have_idle_worker) {
+                        start_lingering_close(cs);
+                        break;
+                    }
+                    rc = push2worker(out_pfd, event_pollset);
+                    if (rc != APR_SUCCESS) {
+                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
+                                     ap_server_conf, "push2worker failed");
+                    }
+                    else {
+                        have_idle_worker = 0;
+                    }
+                    break;
+                case CONN_STATE_LINGER_NORMAL:
+                case CONN_STATE_LINGER_SHORT:
+                    process_lingering_close(cs, out_pfd);
                     break;
                 default:
-                    ap_log_error(APLOG_MARK, APLOG_ERR, rc,
+                    ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
                                  ap_server_conf,
                                  "event_loop: unexpected state %d",
                                  cs->state);
                     AP_DEBUG_ASSERT(0);
                 }
-
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_REMOVE(cs, timeout_list);
-                apr_thread_mutex_unlock(timeout_mutex);
-                APR_RING_ELEM_INIT(cs, timeout_list);
-
-                rc = push2worker(out_pfd, event_pollset);
-                if (rc != APR_SUCCESS) {
-                    ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                 ap_server_conf, "push2worker failed");
-                }
-                else {
-                    have_idle_worker = 0;
-                }
             }
             else if (pt->type == PT_ACCEPT) {
                 /* A Listener Socket is ready for an accept() */
+                if (workers_were_busy) {
+                    if (!listeners_disabled)
+                        disable_listensocks(process_slot);
+                    listeners_disabled = 1;
+                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+                                 "All workers busy, not accepting new conns"
+                                 "in this process");
+                }
+                else if (apr_atomic_read32(&connection_count) > threads_per_child
+                         + ap_queue_info_get_idlers(worker_queue_info) *
+                           worker_factor / WORKER_FACTOR_SCALE)
+                {
+                    if (!listeners_disabled)
+                        disable_listensocks(process_slot);
+                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+                                 "Too many open connections (%u), "
+                                 "not accepting new conns in this process",
+                                 apr_atomic_read32(&connection_count));
+                    ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                                 "Idle workers: %u",
+                                 ap_queue_info_get_idlers(worker_queue_info));
+                    listeners_disabled = 1;
+                }
+                else if (listeners_disabled) {
+                    listeners_disabled = 0;
+                    enable_listensocks(process_slot);
+                }
+                if (!listeners_disabled) {
+                    lr = (ap_listen_rec *) pt->baton;
+                    ap_pop_pool(&ptrans, worker_queue_info);
 
-                lr = (ap_listen_rec *) pt->baton;
-
-                ap_pop_pool(&ptrans, worker_queue_info);
-
-                if (ptrans == NULL) {
-                    /* create a new transaction pool for each accepted socket */
-                    apr_allocator_t *allocator;
-
-                    apr_allocator_create(&allocator);
-                    apr_allocator_max_free_set(allocator,
-                                               ap_max_mem_free);
-                    apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
-                    apr_allocator_owner_set(allocator, ptrans);
                     if (ptrans == NULL) {
-                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                     ap_server_conf,
-                                     "Failed to create transaction pool");
-                        signal_threads(ST_GRACEFUL);
-                        return NULL;
+                        /* create a new transaction pool for each accepted socket */
+                        apr_allocator_t *allocator;
+
+                        apr_allocator_create(&allocator);
+                        apr_allocator_max_free_set(allocator,
+                                                   ap_max_mem_free);
+                        apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
+                        apr_allocator_owner_set(allocator, ptrans);
+                        if (ptrans == NULL) {
+                            ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
+                                         ap_server_conf,
+                                         "Failed to create transaction pool");
+                            signal_threads(ST_GRACEFUL);
+                            return NULL;
+                        }
                     }
-                }
-                apr_pool_tag(ptrans, "transaction");
+                    apr_pool_tag(ptrans, "transaction");
 
-                rc = lr->accept_func(&csd, lr, ptrans);
+                    get_worker(&have_idle_worker, 1, &workers_were_busy);
+                    rc = lr->accept_func(&csd, lr, ptrans);
 
-                /* later we trash rv and rely on csd to indicate
-                 * success/failure
-                 */
-                AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd);
+                    /* later we trash rv and rely on csd to indicate
+                     * success/failure
+                     */
+                    AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd);
 
-                if (rc == APR_EGENERAL) {
-                    /* E[NM]FILE, ENOMEM, etc */
-                    resource_shortage = 1;
-                    signal_threads(ST_GRACEFUL);
-                }
+                    if (rc == APR_EGENERAL) {
+                        /* E[NM]FILE, ENOMEM, etc */
+                        resource_shortage = 1;
+                        signal_threads(ST_GRACEFUL);
+                    }
 
-                if (csd != NULL) {
-                    rc = ap_queue_push(worker_queue, csd, NULL, ptrans);
-                    if (rc != APR_SUCCESS) {
-                        /* trash the connection; we couldn't queue the connected
-                         * socket to a worker
-                         */
-                        apr_socket_close(csd);
-                        ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
-                                     ap_server_conf,
-                                     "ap_queue_push failed");
-                        apr_pool_clear(ptrans);
-                        ap_push_pool(worker_queue_info, ptrans);
+                    if (csd != NULL) {
+                        rc = ap_queue_push(worker_queue, csd, NULL, ptrans);
+                        if (rc != APR_SUCCESS) {
+                            /* trash the connection; we couldn't queue the connected
+                             * socket to a worker
+                             */
+                            apr_socket_close(csd);
+                            ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
+                                         ap_server_conf,
+                                         "ap_queue_push failed");
+                            apr_pool_clear(ptrans);
+                            ap_push_pool(worker_queue_info, ptrans);
+                        }
+                        else {
+                            have_idle_worker = 0;
+                        }
                     }
                     else {
-                        have_idle_worker = 0;
+                        apr_pool_clear(ptrans);
+                        ap_push_pool(worker_queue_info, ptrans);
                     }
                 }
-                else {
-                    apr_pool_clear(ptrans);
-                    ap_push_pool(worker_queue_info, ptrans);
-                }
             }               /* if:else on pt->type */
 #if HAVE_SERF
             else if (pt->type == PT_SERF) {
                 /* send socket to serf. */
-                /* XXXX: this doesn't require get_worker(&have_idle_worker) */
+                /* XXXX: this doesn't require get_worker() */
                 serf_event_trigger(g_serf, pt->baton, out_pfd);
             }
 #endif
@@ -1331,71 +1577,60 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         /* XXX possible optimization: stash the current time for use as
          * r->request_time for new requests
          */
-        time_now = apr_time_now();
-
-        /* handle timed out sockets */
-        apr_thread_mutex_lock(timeout_mutex);
-
-        /* Step 1: keepalive timeouts */
-        cs = APR_RING_FIRST(&keepalive_timeout_head);
-        timeout_time = time_now + TIMEOUT_FUDGE_FACTOR;
-        while (!APR_RING_EMPTY(&keepalive_timeout_head, conn_state_t, timeout_list)
-               && cs->expiration_time < timeout_time) {
-
-            cs->state = CONN_STATE_LINGER;
+        now = apr_time_now();
+        /* we only do this once per 0.1s (TIMEOUT_FUDGE_FACTOR) */
+        if (now > timeout_time) {
+            struct process_score *ps;
+            timeout_time = now + TIMEOUT_FUDGE_FACTOR;
 
-            APR_RING_REMOVE(cs, timeout_list);
-            apr_thread_mutex_unlock(timeout_mutex);
+            /* handle timed out sockets */
+            apr_thread_mutex_lock(timeout_mutex);
 
-            if (!get_worker(&have_idle_worker)) {
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_INSERT_HEAD(&keepalive_timeout_head, cs,
-                                     conn_state_t, timeout_list);
-                break;
+            /* Step 1: keepalive timeouts */
+            /* If all workers are busy, we kill older keep-alive connections so that they
+             * may connect to another process.
+             */
+            if (workers_were_busy && keepalive_q.count) {
+                ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+                             "All workers are busy, will close %d keep-alive "
+                             "connections",
+                             keepalive_q.count);
+                process_timeout_queue(&keepalive_q,
+                                      timeout_time + ap_server_conf->keep_alive_timeout,
+                                      start_lingering_close);
             }
-
-            rc = push2worker(&cs->pfd, event_pollset);
-
-            if (rc != APR_SUCCESS) {
-                return NULL;
-                /* XXX return NULL looks wrong - not an init failure
-                 * that bypasses all the cleanup outside the main loop
-                 * break seems more like it
-                 * need to evaluate seriousness of push2worker failures
-                 */
+            else {
+                process_timeout_queue(&keepalive_q, timeout_time,
+                                      start_lingering_close);
             }
-            have_idle_worker = 0;
-            apr_thread_mutex_lock(timeout_mutex);
-            cs = APR_RING_FIRST(&keepalive_timeout_head);
-        }
-
-        /* Step 2: write completion timeouts */
-        cs = APR_RING_FIRST(&timeout_head);
-        while (!APR_RING_EMPTY(&timeout_head, conn_state_t, timeout_list)
-               && cs->expiration_time < timeout_time) {
-
-            cs->state = CONN_STATE_LINGER;
-            APR_RING_REMOVE(cs, timeout_list);
+            /* Step 2: write completion timeouts */
+            process_timeout_queue(&write_completion_q, timeout_time, start_lingering_close);
+            /* Step 3: (normal) lingering close completion timeouts */
+            process_timeout_queue(&linger_q, timeout_time, stop_lingering_close);
+            /* Step 4: (short) lingering close completion timeouts */
+            process_timeout_queue(&short_linger_q, timeout_time, stop_lingering_close);
+
+            ps = ap_get_scoreboard_process(process_slot);
+            ps->write_completion = write_completion_q.count;
+            ps->lingering_close = linger_q.count + short_linger_q.count;
+            ps->keep_alive = keepalive_q.count;
             apr_thread_mutex_unlock(timeout_mutex);
 
-            if (!get_worker(&have_idle_worker)) {
-                apr_thread_mutex_lock(timeout_mutex);
-                APR_RING_INSERT_HEAD(&timeout_head, cs,
-                                     conn_state_t, timeout_list);
-                break;
-            }
-
-            rc = push2worker(&cs->pfd, event_pollset);
-            if (rc != APR_SUCCESS) {
-                return NULL;
-            }
-            have_idle_worker = 0;
-            apr_thread_mutex_lock(timeout_mutex);
-            cs = APR_RING_FIRST(&timeout_head);
+            ps->connections = apr_atomic_read32(&connection_count);
+            /* XXX: should count CONN_STATE_SUSPENDED and set ps->suspended */
         }
-
-        apr_thread_mutex_unlock(timeout_mutex);
-
+        if (listeners_disabled && !workers_were_busy &&
+            (int)apr_atomic_read32(&connection_count) <
+            ((int)ap_queue_info_get_idlers(worker_queue_info) - 1) *
+            worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
+        {
+            listeners_disabled = 0;
+            enable_listensocks(process_slot);
+        }
+        /*
+         * XXX: do we need to set some timeout that re-enables the listensocks
+         * XXX: in case no other event occurs?
+         */
     }     /* listener main loop */
 
     close_listeners(process_slot, &closed);
@@ -1423,7 +1658,7 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
     apr_status_t rv;
     int is_idle = 0;
     timer_event_t *te = NULL;
-    
+
     free(ti);
 
     ap_scoreboard_image->servers[process_slot][thread_slot].pid = ap_my_pid;
@@ -1446,14 +1681,13 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
         }
 
         ap_update_child_status_from_indexes(process_slot, thread_slot,
-                                            dying ? SERVER_DEAD : SERVER_READY, NULL);
+                                            dying ? SERVER_GRACEFUL : SERVER_READY, NULL);
       worker_pop:
         if (workers_may_exit) {
             break;
         }
 
         te = NULL;
-        
         rv = ap_queue_pop_something(worker_queue, &csd, &cs, &ptrans, &te);
 
         if (rv != APR_SUCCESS) {
@@ -1485,7 +1719,6 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
             continue;
         }
         if (te != NULL) {
-            
             te->cbfunc(te->baton);
 
             {
@@ -1506,7 +1739,7 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
     }
 
     ap_update_child_status_from_indexes(process_slot, thread_slot,
-                                        (dying) ? SERVER_DEAD :
+                                        dying ? SERVER_DEAD :
                                         SERVER_GRACEFUL,
                                         (request_rec *) NULL);
 
@@ -1533,7 +1766,7 @@ static void create_listener_thread(thread_starter * ts)
     proc_info *my_info;
     apr_status_t rv;
 
-    my_info = (proc_info *) malloc(sizeof(proc_info));
+    my_info = (proc_info *) ap_malloc(sizeof(proc_info));
     my_info->pid = my_child_num;
     my_info->tid = -1;          /* listener thread doesn't have a thread slot */
     my_info->sd = 0;
@@ -1567,6 +1800,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     int listener_started = 0;
     int loops;
     int prev_threads_created;
+    int max_recycled_pools = -1;
 
     /* We must create the fd queues before we start up the listener
      * and worker threads. */
@@ -1578,8 +1812,15 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
+    if (ap_max_mem_free != APR_ALLOCATOR_MAX_FREE_UNLIMITED) {
+        /* If we want to conserve memory, let's not keep an unlimited number of
+         * pools & allocators.
+         * XXX: This should probably be a separate config directive
+         */
+        max_recycled_pools = threads_per_child * 3 / 4 ;
+    }
     rv = ap_queue_info_create(&worker_queue_info, pchild,
-                              threads_per_child);
+                              threads_per_child, max_recycled_pools);
     if (rv != APR_SUCCESS) {
         ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
                      "ap_queue_info_create() failed");
@@ -1599,7 +1840,10 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
 
     /* Create the main pollset */
     rv = apr_pollset_create(&event_pollset,
-                            threads_per_child,
+                            threads_per_child, /* XXX don't we need more, to handle
+                                                * connections in K-A or lingering
+                                                * close?
+                                                */
                             pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
     if (rv != APR_SUCCESS) {
         ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
@@ -1621,12 +1865,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
                 continue;
             }
 
-            my_info = (proc_info *) malloc(sizeof(proc_info));
-            if (my_info == NULL) {
-                ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                             "malloc: out of memory");
-                clean_child_exit(APEXIT_CHILDFATAL);
-            }
+            my_info = (proc_info *) ap_malloc(sizeof(proc_info));
             my_info->pid = my_child_num;
             my_info->tid = i;
             my_info->sd = 0;
@@ -1775,7 +2014,6 @@ static void child_main(int child_num_arg)
     apr_thread_mutex_create(&g_timer_ring_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
     APR_RING_INIT(&timer_free_ring, timer_event_t, link);
     APR_RING_INIT(&timer_ring, timer_event_t, link);
-    
     ap_run_child_init(pchild, ap_server_conf);
 
     /* done with init critical section */
@@ -1804,16 +2042,8 @@ static void child_main(int child_num_arg)
     /* clear the storage; we may not create all our threads immediately,
      * and we want a 0 entry to indicate a thread which was not created
      */
-    threads = (apr_thread_t **) calloc(1,
-                                       sizeof(apr_thread_t *) *
-                                       threads_per_child);
-    if (threads == NULL) {
-        ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
-                     "malloc: out of memory");
-        clean_child_exit(APEXIT_CHILDFATAL);
-    }
-
-    ts = (thread_starter *) apr_palloc(pchild, sizeof(*ts));
+    threads = ap_calloc(threads_per_child, sizeof(apr_thread_t *));
+    ts = apr_palloc(pchild, sizeof(*ts));
 
     apr_threadattr_create(&thread_attr, pchild);
     /* 0 means PTHREAD_CREATE_JOINABLE */
@@ -1975,6 +2205,7 @@ static int make_child(server_rec * s, int slot)
         event_note_child_lost_slot(slot, pid);
     }
     ap_scoreboard_image->parent[slot].quiescing = 0;
+    ap_scoreboard_image->parent[slot].not_accepting = 0;
     event_note_child_started(slot, pid);
     return 0;
 }
@@ -2028,7 +2259,7 @@ static void perform_idle_server_maintenance(void)
             /* short cut if all active processes have been examined and
              * enough empty scoreboard slots have been found
              */
-        
+
             break;
         ps = &ap_scoreboard_image->parent[i];
         for (j = 0; j < threads_per_child; j++) {
@@ -2050,8 +2281,9 @@ static void perform_idle_server_maintenance(void)
              */
             if (ps->pid != 0) { /* XXX just set all_dead_threads in outer
                                    for loop if no pid?  not much else matters */
-                if (status <= SERVER_READY &&
-                        !ps->quiescing && ps->generation == retained->my_generation) {
+                if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting
+                    && ps->generation == retained->my_generation)
+                {
                     ++idle_thread_count;
                 }
                 if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
@@ -2124,17 +2356,16 @@ static void perform_idle_server_maintenance(void)
             if (active_thread_count >= ap_daemons_limit * threads_per_child) {
                 if (!retained->maxclients_reported) {
                     /* only report this condition once */
-                    ap_log_error(APLOG_MARK, APLOG_ERR, 0,
-                                 ap_server_conf,
-                                 "server reached MaxClients setting, consider"
-                                 " raising the MaxClients setting");
+                    ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf,
+                                 "server reached MaxRequestWorkers setting, "
+                                 "consider raising the MaxRequestWorkers "
+                                 "setting");
                     retained->maxclients_reported = 1;
                 }
             }
             else {
-                ap_log_error(APLOG_MARK, APLOG_ERR, 0,
-                             ap_server_conf,
-                             "scoreboard is full, not at MaxClients");
+                ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf,
+                             "scoreboard is full, not at MaxRequestWorkers");
             }
             retained->idle_spawn_rate = 1;
         }
@@ -2143,8 +2374,7 @@ static void perform_idle_server_maintenance(void)
                 free_length = retained->idle_spawn_rate;
             }
             if (retained->idle_spawn_rate >= 8) {
-                ap_log_error(APLOG_MARK, APLOG_INFO, 0,
-                             ap_server_conf,
+                ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
                              "server seems busy, (you may need "
                              "to increase StartServers, ThreadsPerChild "
                              "or Min/MaxSpareThreads), "
@@ -2281,6 +2511,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
         ap_scoreboard_image->global->running_generation = retained->my_generation;
     }
 
+    restart_pending = shutdown_pending = 0;
     set_signals();
     /* Don't thrash... */
     if (max_spare_threads < min_spare_threads + threads_per_child)
@@ -2316,7 +2547,6 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
                  "Server built: %s", ap_get_server_built());
     ap_log_command_line(plog, s);
 
-    restart_pending = shutdown_pending = 0;
     mpm_state = AP_MPMQ_RUNNING;
 
     server_main_loop(remaining_children_to_start);
@@ -2515,6 +2745,8 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
         apr_pollset_destroy(event_pollset);
 
         if (!one_process && !foreground) {
+            /* before we detach, setup crash handlers to log to errorlog */
+            ap_fatal_signal_setup(ap_server_conf, pconf);
             rv = apr_proc_detach(no_detach ? APR_PROC_DETACH_FOREGROUND
                                  : APR_PROC_DETACH_DAEMONIZE);
             if (rv != APR_SUCCESS) {
@@ -2535,7 +2767,7 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
     thread_limit = DEFAULT_THREAD_LIMIT;
     ap_daemons_limit = server_limit;
     threads_per_child = DEFAULT_THREADS_PER_CHILD;
-    max_clients = ap_daemons_limit * threads_per_child;
+    max_workers = ap_daemons_limit * threads_per_child;
     ap_extended_status = 0;
 
     return OK;
@@ -2671,57 +2903,57 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
         threads_per_child = 1;
     }
 
-    if (max_clients < threads_per_child) {
+    if (max_workers < threads_per_child) {
         if (startup) {
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d is less than "
-                         "ThreadsPerChild of", max_clients);
+                         "WARNING: MaxRequestWorkers of %d is less than "
+                         "ThreadsPerChild of", max_workers);
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         " %d, increasing to %d.  MaxClients must be at "
+                         " %d, increasing to %d.  MaxRequestWorkers must be at "
                          "least as large",
                          threads_per_child, threads_per_child);
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
                          " as the number of threads in a single server.");
         } else {
             ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d is less than ThreadsPerChild "
+                         "MaxRequestWorkers of %d is less than ThreadsPerChild "
                          "of %d, increasing to match",
-                         max_clients, threads_per_child);
+                         max_workers, threads_per_child);
         }
-        max_clients = threads_per_child;
+        max_workers = threads_per_child;
     }
 
-    ap_daemons_limit = max_clients / threads_per_child;
+    ap_daemons_limit = max_workers / threads_per_child;
 
-    if (max_clients % threads_per_child) {
-        int tmp_max_clients = ap_daemons_limit * threads_per_child;
+    if (max_workers % threads_per_child) {
+        int tmp_max_workers = ap_daemons_limit * threads_per_child;
 
         if (startup) {
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d is not an integer "
-                         "multiple of", max_clients);
+                         "WARNING: MaxRequestWorkers of %d is not an integer "
+                         "multiple of", max_workers);
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
                          " ThreadsPerChild of %d, decreasing to nearest "
                          "multiple %d,", threads_per_child,
-                         tmp_max_clients);
+                         tmp_max_workers);
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
                          " for a maximum of %d servers.",
                          ap_daemons_limit);
         } else {
             ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d is not an integer multiple of "
-                         "ThreadsPerChild of %d, decreasing to nearest "
-                         "multiple %d", max_clients, threads_per_child,
-                         tmp_max_clients);
+                         "MaxRequestWorkers of %d is not an integer multiple "
+                         "of ThreadsPerChild of %d, decreasing to nearest "
+                         "multiple %d", max_workers, threads_per_child,
+                         tmp_max_workers);
         }
-        max_clients = tmp_max_clients;
+        max_workers = tmp_max_workers;
     }
 
     if (ap_daemons_limit > server_limit) {
         if (startup) {
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
-                         "WARNING: MaxClients of %d would require %d "
-                         "servers and ", max_clients, ap_daemons_limit);
+                         "WARNING: MaxRequestWorkers of %d would require %d "
+                         "servers and ", max_workers, ap_daemons_limit);
             ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
                          " would exceed ServerLimit of %d, decreasing to %d.",
                          server_limit, server_limit * threads_per_child);
@@ -2730,9 +2962,9 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
                          "directive.");
         } else {
             ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
-                         "MaxClients of %d would require %d servers and "
+                         "MaxRequestWorkers of %d would require %d servers and "
                          "exceed ServerLimit of %d, decreasing to %d",
-                         max_clients, ap_daemons_limit, server_limit,
+                         max_workers, ap_daemons_limit, server_limit,
                          server_limit * threads_per_child);
         }
         ap_daemons_limit = server_limit;
@@ -2834,15 +3066,19 @@ static const char *set_max_spare_threads(cmd_parms * cmd, void *dummy,
     return NULL;
 }
 
-static const char *set_max_clients(cmd_parms * cmd, void *dummy,
+static const char *set_max_workers(cmd_parms * cmd, void *dummy,
                                    const char *arg)
 {
     const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
     if (err != NULL) {
         return err;
     }
-
-    max_clients = atoi(arg);
+    if (!strcasecmp(cmd->cmd->name, "MaxClients")) {
+        ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL,
+                     "MaxClients is deprecated, use MaxRequestWorkers "
+                     "instead.");
+    }
+    max_workers = atoi(arg);
     return NULL;
 }
 
@@ -2880,6 +3116,27 @@ static const char *set_thread_limit(cmd_parms * cmd, void *dummy,
     return NULL;
 }
 
+static const char *set_worker_factor(cmd_parms * cmd, void *dummy,
+                                     const char *arg)
+{
+    double val;
+    char *endptr;
+    const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+    if (err != NULL) {
+        return err;
+    }
+
+    val = strtod(arg, &endptr);
+    if (*endptr)
+        return "error parsing value";
+
+    worker_factor = val * WORKER_FACTOR_SCALE;
+    if (worker_factor == 0)
+        worker_factor = 1;
+    return NULL;
+}
+
+
 static const command_rec event_cmds[] = {
     LISTEN_COMMANDS,
     AP_INIT_TAKE1("StartServers", set_daemons_to_start, NULL, RSRC_CONF,
@@ -2890,13 +3147,18 @@ static const command_rec event_cmds[] = {
                   "Minimum number of idle threads, to handle request spikes"),
     AP_INIT_TAKE1("MaxSpareThreads", set_max_spare_threads, NULL, RSRC_CONF,
                   "Maximum number of idle threads"),
-    AP_INIT_TAKE1("MaxClients", set_max_clients, NULL, RSRC_CONF,
+    AP_INIT_TAKE1("MaxClients", set_max_workers, NULL, RSRC_CONF,
+                  "Deprecated name of MaxRequestWorkers"),
+    AP_INIT_TAKE1("MaxRequestWorkers", set_max_workers, NULL, RSRC_CONF,
                   "Maximum number of threads alive at the same time"),
     AP_INIT_TAKE1("ThreadsPerChild", set_threads_per_child, NULL, RSRC_CONF,
                   "Number of threads each child creates"),
     AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF,
                   "Maximum number of worker threads per child process for this "
                   "run of Apache - Upper limit for ThreadsPerChild"),
+    AP_INIT_TAKE1("AsyncRequestWorkerFactor", set_worker_factor, NULL, RSRC_CONF,
+                  "How many additional connects will be accepted per idle "
+                  "worker thread"),
     AP_GRACEFUL_SHUTDOWN_TIMEOUT_COMMAND,
     {NULL}
 };