struct timeout_queue {
struct timeout_head_t head;
- int count, *total;
apr_interval_time_t timeout;
- struct timeout_queue *next;
+ apr_uint32_t count; /* for this queue */
+ apr_uint32_t *total; /* for all chained/related queues */
+ struct timeout_queue *next; /* chaining */
};
/*
* Several timeout queues that use different timeouts, so that we always can
* Macros for accessing struct timeout_queue.
* For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held.
*/
-#define TO_QUEUE_APPEND(q, el) \
- do { \
- APR_RING_INSERT_TAIL(&(q)->head, el, event_conn_state_t, \
- timeout_list); \
- ++*(q)->total; \
- ++(q)->count; \
- } while (0)
-
-#define TO_QUEUE_REMOVE(q, el) \
- do { \
- APR_RING_REMOVE(el, timeout_list); \
- --*(q)->total; \
- --(q)->count; \
- } while (0)
-
-#define TO_QUEUE_INIT(q, p, t, v) \
- do { \
- struct timeout_queue *b = (v); \
- (q) = apr_palloc((p), sizeof *(q)); \
- APR_RING_INIT(&(q)->head, event_conn_state_t, timeout_list); \
- (q)->total = (b) ? (b)->total : apr_pcalloc((p), sizeof *(q)->total); \
- (q)->count = 0; \
- (q)->timeout = (t); \
- (q)->next = NULL; \
- } while (0)
-
-#define TO_QUEUE_ELEM_INIT(el) APR_RING_ELEM_INIT(el, timeout_list)
+static void TO_QUEUE_APPEND(struct timeout_queue *q, event_conn_state_t *el)
+{
+ APR_RING_INSERT_TAIL(&q->head, el, event_conn_state_t, timeout_list);
+ apr_atomic_inc32(q->total);
+ ++q->count;
+}
+
+static void TO_QUEUE_REMOVE(struct timeout_queue *q, event_conn_state_t *el)
+{
+ APR_RING_REMOVE(el, timeout_list);
+ apr_atomic_dec32(q->total);
+ --q->count;
+}
+
+static struct timeout_queue *TO_QUEUE_MAKE(apr_pool_t *p, apr_time_t t,
+ struct timeout_queue *ref)
+{
+ struct timeout_queue *q;
+
+ q = apr_pcalloc(p, sizeof *q);
+ APR_RING_INIT(&q->head, event_conn_state_t, timeout_list);
+ q->total = (ref) ? ref->total : apr_pcalloc(p, sizeof *q->total);
+ q->timeout = t;
+
+ return q;
+}
+
+#define TO_QUEUE_ELEM_INIT(el) \
+ APR_RING_ELEM_INIT((el), timeout_list)
/*
* The pollset for sockets that are in any of the timeout queues. Currently
apr_time_t timeout_time,
int (*func)(event_conn_state_t *))
{
- int total = 0, count;
+ apr_uint32_t total = 0, count;
event_conn_state_t *first, *cs, *last;
struct timeout_head_t trash;
struct timeout_queue *qp;
apr_status_t rv;
- if (!*q->total) {
+ if (!apr_atomic_read32(q->total)) {
return;
}
APR_RING_UNSPLICE(first, last, timeout_list);
APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
timeout_list);
+ AP_DEBUG_ASSERT(apr_atomic_read32(q->total) >= count);
+ apr_atomic_sub32(q->total, count);
qp->count -= count;
total += count;
}
if (!total)
return;
- AP_DEBUG_ASSERT(*q->total >= total);
- *q->total -= total;
apr_thread_mutex_unlock(timeout_mutex);
first = APR_RING_FIRST(&trash);
do {
apr_status_t rc;
proc_info *ti = dummy;
int process_slot = ti->pslot;
+ struct process_score *ps = ap_get_scoreboard_process(process_slot);
apr_pool_t *tpool = apr_thread_pool_get(thd);
apr_time_t timeout_time = 0, last_log;
int closed = 0, listeners_disabled = 0;
apr_interval_time_t timeout_interval;
apr_time_t now;
int workers_were_busy = 0;
+ int keepalives;
+
if (listener_may_exit) {
close_listeners(process_slot, &closed);
if (terminate_mode == ST_UNGRACEFUL
"keep-alive: %d lingering: %d suspended: %u)",
apr_atomic_read32(&connection_count),
apr_atomic_read32(&clogged_count),
- *write_completion_q->total,
- *keepalive_q->total,
+ apr_atomic_read32(write_completion_q->total),
+ apr_atomic_read32(keepalive_q->total),
apr_atomic_read32(&lingering_count),
apr_atomic_read32(&suspended_count));
if (dying) {
* will exceed now + TIMEOUT_FUDGE_FACTOR, can't happen otherwise).
*/
if (now > timeout_time || now + TIMEOUT_FUDGE_FACTOR < timeout_time ) {
- struct process_score *ps;
timeout_time = now + TIMEOUT_FUDGE_FACTOR;
/* handle timed out sockets */
/* If all workers are busy, we kill older keep-alive connections so that they
* may connect to another process.
*/
- if ((workers_were_busy || dying) && *keepalive_q->total) {
- if (!dying)
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
- "All workers are busy, will close %d keep-alive "
- "connections",
- *keepalive_q->total);
+ if ((workers_were_busy || dying)
+ && (keepalives = apr_atomic_read32(keepalive_q->total))) {
+ /* If all workers are busy, we kill older keep-alive connections so
+ * that they may connect to another process.
+ */
+ ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
+ "All workers are %s, will close %d keep-alive "
+ "connections", dying ? "dying" : "busy",
+ keepalives);
process_timeout_queue(keepalive_q, 0,
start_lingering_close_nonblocking);
}
process_timeout_queue(write_completion_q, timeout_time,
start_lingering_close_nonblocking);
/* Step 3: (normal) lingering close completion timeouts */
- process_timeout_queue(linger_q, timeout_time, stop_lingering_close);
+ process_timeout_queue(linger_q, timeout_time,
+ stop_lingering_close);
/* Step 4: (short) lingering close completion timeouts */
- process_timeout_queue(short_linger_q, timeout_time, stop_lingering_close);
+ process_timeout_queue(short_linger_q, timeout_time,
+ stop_lingering_close);
- ps = ap_get_scoreboard_process(process_slot);
- ps->write_completion = *write_completion_q->total;
- ps->keep_alive = *keepalive_q->total;
apr_thread_mutex_unlock(timeout_mutex);
+ ps->keep_alive = apr_atomic_read32(keepalive_q->total);
+ ps->write_completion = apr_atomic_read32(write_completion_q->total);
ps->connections = apr_atomic_read32(&connection_count);
ps->suspended = apr_atomic_read32(&suspended_count);
ps->lingering_close = apr_atomic_read32(&lingering_count);
wc.hash = apr_hash_make(ptemp);
ka.hash = apr_hash_make(ptemp);
- TO_QUEUE_INIT(linger_q, pconf,
- apr_time_from_sec(MAX_SECS_TO_LINGER), NULL);
- TO_QUEUE_INIT(short_linger_q, pconf,
- apr_time_from_sec(SECONDS_TO_LINGER), NULL);
+ linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER),
+ NULL);
+ short_linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(SECONDS_TO_LINGER),
+ NULL);
for (; s; s = s->next) {
event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
ap_set_module_config(s->module_config, &mpm_event_module, sc);
if (!wc.tail) {
/* The main server uses the global queues */
- TO_QUEUE_INIT(wc.q, pconf, s->timeout, NULL);
+ wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
wc.tail = write_completion_q = wc.q;
- TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, NULL);
+ ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, NULL);
apr_hash_set(ka.hash, &s->keep_alive_timeout,
sizeof s->keep_alive_timeout, ka.q);
ka.tail = keepalive_q = ka.q;
* or their own queue(s) if there isn't */
wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
if (!wc.q) {
- TO_QUEUE_INIT(wc.q, pconf, s->timeout, wc.tail);
+ wc.q = TO_QUEUE_MAKE(pconf, s->timeout, wc.tail);
apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
wc.tail = wc.tail->next = wc.q;
}
ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout,
sizeof s->keep_alive_timeout);
if (!ka.q) {
- TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, ka.tail);
+ ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, ka.tail);
apr_hash_set(ka.hash, &s->keep_alive_timeout,
sizeof s->keep_alive_timeout, ka.q);
ka.tail = ka.tail->next = ka.q;