#include <assert.h>
#include <stddef.h>
+#include <stdlib.h>
#include <apr_atomic.h>
#include <apr_thread_mutex.h>
{
AP_DEBUG_ASSERT(m);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): destroy, refs=%d",
- m->id, m->refs);
+ "h2_mplx(%ld): destroy, ios=%d",
+ m->id, (int)h2_io_set_size(m->stream_ios));
m->aborted = 1;
if (m->ready_ios) {
h2_io_set_destroy(m->ready_ios);
if (m) {
m->id = c->id;
APR_RING_ELEM_INIT(m, link);
- m->refs = 1;
m->c = c;
apr_pool_create_ex(&m->pool, parent, NULL, allocator);
if (!m->pool) {
return m;
}
-static void release(h2_mplx *m, int lock)
-{
- if (lock) {
- apr_thread_mutex_lock(m->lock);
- --m->refs;
- if (m->join_wait) {
- apr_thread_cond_signal(m->join_wait);
- }
- apr_thread_mutex_unlock(m->lock);
- }
- else {
- --m->refs;
- }
-}
-
-void h2_mplx_reference(h2_mplx *m)
-{
- apr_thread_mutex_lock(m->lock);
- ++m->refs;
- apr_thread_mutex_unlock(m->lock);
-}
-
-void h2_mplx_release(h2_mplx *m)
-{
- release(m, 1);
-}
-
static void workers_register(h2_mplx *m)
{
/* Initially, there was ref count increase for this as well, but
{
/* Remove io from ready set, we will never submit it */
h2_io_set_remove(m->ready_ios, io);
- if (io->task_done || h2_tq_remove(m->q, io->id)) {
+ if (!io->worker_started || io->worker_done) {
/* already finished or not even started yet */
+ h2_tq_remove(m->q, io->id);
io_destroy(m, io, 1);
return 0;
}
workers_unregister(m);
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- int i;
+ int i, wait_secs = 5;
/* disable WINDOW_UPDATE callbacks */
h2_mplx_set_consumed_cb(m, NULL, NULL);
+
while (!h2_io_set_iter(m->stream_ios, stream_done_iter, m)) {
- /* iterator until all h2_io have been orphaned or destroyed */
+ /* iterate until all ios have been orphaned or destroyed */
}
- release(m, 0);
- for (i = 0; m->refs > 0; ++i) {
-
+ /* Any remaining ios have handed out requests to workers that are
+ * not done yet. Any operation they do on their assigned stream ios will
+ * be errored ECONNRESET/ABORTED, so that should find out pretty soon.
+ */
+ for (i = 0; h2_io_set_size(m->stream_ios) > 0; ++i) {
m->join_wait = wait;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): release_join, refs=%d, waiting...",
- m->id, m->refs);
+ "h2_mplx(%ld): release_join, waiting on %d worker to report back",
+ m->id, (int)h2_io_set_size(m->stream_ios));
- status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(2));
+ status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
if (APR_STATUS_IS_TIMEUP(status)) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
- "h2_mplx(%ld): release timeup %d, refs=%d, waiting...",
- m->id, i, m->refs);
+ if (i > 0) {
+ /* Oh, oh. Still we wait for assigned workers to report that
+ * they are done. Unless we have a bug, a worker seems to be hanging.
+ * If we exit now, all will be deallocated and the worker, once
+ * it does return, will walk all over freed memory...
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
+ "h2_mplx(%ld): release, waiting for %d seconds now for "
+ "all h2_workers to return, have still %d requests outstanding",
+ m->id, i*wait_secs, (int)h2_io_set_size(m->stream_ios));
+ }
}
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
- "h2_mplx(%ld): release_join -> destroy, (#ios=%ld)",
- m->id, (long)h2_io_set_size(m->stream_ios));
+ "h2_mplx(%ld): release_join -> destroy", m->id);
apr_thread_mutex_unlock(m->lock);
h2_mplx_destroy(m);
/* all gone */
return status;
}
-void h2_mplx_task_done(h2_mplx *m, int stream_id)
+static const h2_request *pop_request(h2_mplx *m)
{
+ const h2_request *req = NULL;
+ int sid;
+ while (!req && (sid = h2_tq_shift(m->q)) > 0) {
+ h2_io *io = h2_io_set_get(m->stream_ios, sid);
+ if (io) {
+ req = io->request;
+ io->worker_started = 1;
+ }
+ }
+ return req;
+}
+
+void h2_mplx_request_done(h2_mplx **pm, int stream_id, const h2_request **preq)
+{
+ h2_mplx *m = *pm;
+
apr_status_t status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): task(%d) done", m->id, stream_id);
+ "h2_mplx(%ld): request(%d) done", m->id, stream_id);
if (io) {
- io->task_done = 1;
+ io->worker_done = 1;
if (io->orphaned) {
io_destroy(m, io, 0);
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
+ }
}
else {
/* hang around until the stream deregisteres */
}
}
+
+ if (preq) {
+ /* someone wants another request, if we have */
+ *preq = pop_request(m);
+ }
+ if (!preq || !*preq) {
+ /* No request to hand back to the worker, NULLify reference
+ * and decrement count */
+ *pm = NULL;
+ }
apr_thread_mutex_unlock(m->lock);
}
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
io_process_events(m, io);
}
else {
- status = APR_EOF;
+ status = APR_ECONNABORTED;
}
apr_thread_mutex_unlock(m->lock);
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
{
apr_status_t status;
h2_stream *stream = NULL;
+
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return NULL;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_pop_highest_prio(m->ready_ios);
- if (io) {
+ if (io && !m->aborted) {
stream = h2_stream_set_get(streams, io->id);
if (stream) {
if (io->rst_error) {
h2_stream_set_response(stream, io->response, io->bbout);
H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_next_submit_post");
}
-
}
else {
/* We have the io ready, but the stream has gone away, maybe
"resetting io to close request processing",
m->id, io->id);
h2_io_make_orphaned(io, H2_ERR_STREAM_CLOSED);
- if (io->task_done) {
+ if (!io->worker_started || io->worker_done) {
io_destroy(m, io, 1);
}
else {
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- status = out_open(m, stream_id, response, f, bb, iowait);
- if (APLOGctrace1(m->c)) {
- h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb);
- }
if (m->aborted) {
- return APR_ECONNABORTED;
+ status = APR_ECONNABORTED;
+ }
+ else {
+ status = out_open(m, stream_id, response, f, bb, iowait);
+ if (APLOGctrace1(m->c)) {
+ h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb);
+ }
}
apr_thread_mutex_unlock(m->lock);
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- if (!m->aborted) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- status = out_write(m, io, f, bb, trailers, iowait);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
- "h2_mplx(%ld-%d): write with trailers=%s",
- m->id, io->id, trailers? "yes" : "no");
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write");
-
- have_out_data_for(m, stream_id);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
- }
- else {
- status = APR_ECONNABORTED;
+ h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
+ if (io && !io->orphaned) {
+ status = out_write(m, io, f, bb, trailers, iowait);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): write with trailers=%s",
+ m->id, io->id, trailers? "yes" : "no");
+ H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write");
+
+ have_out_data_for(m, stream_id);
+ if (m->aborted) {
+ return APR_ECONNABORTED;
}
}
-
- if (m->lock) {
- apr_thread_mutex_unlock(m->lock);
+ else {
+ status = APR_ECONNABORTED;
}
+ apr_thread_mutex_unlock(m->lock);
}
return status;
}
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- if (!m->aborted) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->orphaned) {
- if (!io->response && !io->rst_error) {
- /* In case a close comes before a response was created,
- * insert an error one so that our streams can properly
- * reset.
- */
- h2_response *r = h2_response_die(stream_id, APR_EGENERAL,
- io->request, m->pool);
- status = out_open(m, stream_id, r, NULL, NULL, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
- "h2_mplx(%ld-%d): close, no response, no rst",
- m->id, io->id);
- }
+ h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
+ if (io && !io->orphaned) {
+ if (!io->response && !io->rst_error) {
+ /* In case a close comes before a response was created,
+ * insert an error one so that our streams can properly
+ * reset.
+ */
+ h2_response *r = h2_response_die(stream_id, APR_EGENERAL,
+ io->request, m->pool);
+ status = out_open(m, stream_id, r, NULL, NULL, NULL);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
- "h2_mplx(%ld-%d): close with trailers=%s",
- m->id, io->id, trailers? "yes" : "no");
- status = h2_io_out_close(io, trailers);
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_close");
-
- have_out_data_for(m, stream_id);
- if (m->aborted) {
- /* if we were the last output, the whole session might
- * have gone down in the meantime.
- */
- return APR_SUCCESS;
- }
+ "h2_mplx(%ld-%d): close, no response, no rst",
+ m->id, io->id);
}
- else {
- status = APR_ECONNABORTED;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): close with trailers=%s",
+ m->id, io->id, trailers? "yes" : "no");
+ status = h2_io_out_close(io, trailers);
+ H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_close");
+
+ have_out_data_for(m, stream_id);
+ if (m->aborted) {
+ /* if we were the last output, the whole session might
+ * have gone down in the meantime.
+ */
+ return APR_SUCCESS;
}
}
+ else {
+ status = APR_ECONNABORTED;
+ }
apr_thread_mutex_unlock(m->lock);
}
return status;
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- if (!m->aborted) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io && !io->rst_error && !io->orphaned) {
- h2_io_rst(io, error);
- if (!io->response) {
- h2_io_set_add(m->ready_ios, io);
- }
- H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_rst");
-
- have_out_data_for(m, stream_id);
- h2_io_signal(io, H2_IO_WRITE);
- }
- else {
- status = APR_ECONNABORTED;
+ h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
+ if (io && !io->rst_error && !io->orphaned) {
+ h2_io_rst(io, error);
+ if (!io->response) {
+ h2_io_set_add(m->ready_ios, io);
}
+ H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_rst");
+
+ have_out_data_for(m, stream_id);
+ h2_io_signal(io, H2_IO_WRITE);
+ }
+ else {
+ status = APR_ECONNABORTED;
}
apr_thread_mutex_unlock(m->lock);
}
int has_eos = 0;
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return 0;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io) {
- has_eos = io->orphaned || h2_io_in_has_eos_for(io);
+ if (io && !io->orphaned) {
+ has_eos = h2_io_in_has_eos_for(io);
+ }
+ else {
+ has_eos = 1;
}
apr_thread_mutex_unlock(m->lock);
}
apr_status_t status;
int has_data = 0;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return 0;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io) {
+ if (io && !io->orphaned) {
has_data = h2_io_out_has_data(io);
}
+ else {
+ has_data = 0;
+ }
apr_thread_mutex_unlock(m->lock);
}
return has_data;
{
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- m->added_output = iowait;
- status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
- if (APLOGctrace2(m->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): trywait on data for %f ms)",
- m->id, timeout/1000.0);
- }
- m->added_output = NULL;
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ m->added_output = iowait;
+ status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
+ if (APLOGctrace2(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): trywait on data for %f ms)",
+ m->id, timeout/1000.0);
+ }
+ m->added_output = NULL;
+ }
apr_thread_mutex_unlock(m->lock);
}
return status;
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- h2_tq_sort(m->q, cmp, ctx);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): reprioritize tasks", m->id);
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_tq_sort(m->q, cmp, ctx);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): reprioritize tasks", m->id);
+ }
apr_thread_mutex_unlock(m->lock);
}
workers_register(m);
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- h2_io *io = open_io(m, stream_id);
- io->request = req;
-
- if (!io->request->body) {
- status = h2_io_in_close(io);
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_io *io = open_io(m, stream_id);
+ io->request = req;
+
+ if (!io->request->body) {
+ status = h2_io_in_close(io);
+ }
+
+ h2_tq_add(m->q, io->id, cmp, ctx);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%ld-%d): process", m->c->id, stream_id);
+ H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_process");
}
-
- h2_tq_add(m->q, io->id, cmp, ctx);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
- "h2_mplx(%ld-%d): process", m->c->id, stream_id);
- H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_process");
apr_thread_mutex_unlock(m->lock);
}
apr_status_t status;
AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- *has_more = 0;
- return NULL;
- }
status = apr_thread_mutex_lock(m->lock);
if (APR_SUCCESS == status) {
- int sid;
- while (!req && (sid = h2_tq_shift(m->q)) > 0) {
- h2_io *io = h2_io_set_get(m->stream_ios, sid);
- if (io) {
- req = io->request;
- }
+ if (m->aborted) {
+ req = NULL;
+ *has_more = 0;
+ }
+ else {
+ req = pop_request(m);
+ *has_more = !h2_tq_empty(m->q);
}
- *has_more = !h2_tq_empty(m->q);
apr_thread_mutex_unlock(m->lock);
}
return req;
nghttp2_option *options = NULL;
apr_pool_t *pool = NULL;
- apr_status_t status = apr_pool_create(&pool, r? r->pool : c->pool);
+ apr_status_t status = apr_pool_create(&pool, c->pool);
h2_session *session;
if (status != APR_SUCCESS) {
return NULL;
session->max_stream_count = h2_config_geti(session->config, H2_CONF_MAX_STREAMS);
session->max_stream_mem = h2_config_geti(session->config, H2_CONF_STREAM_MAX_MEM);
session->timeout_secs = h2_config_geti(session->config, H2_CONF_TIMEOUT_SECS);
+ if (session->timeout_secs <= 0) {
+ session->timeout_secs = apr_time_sec(session->s->timeout);
+ }
session->keepalive_secs = h2_config_geti(session->config, H2_CONF_KEEPALIVE_SECS);
if (session->keepalive_secs <= 0) {
- session->keepalive_secs = session->timeout_secs;
+ session->keepalive_secs = apr_time_sec(session->s->keep_alive_timeout);
}
status = apr_thread_cond_create(&session->iowait, session->pool);
h2_session_destroy(session);
return NULL;
}
-
+
+ if (APLOGcdebug(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+ "session(%ld) created, timeout=%d, keepalive_timeout=%d, "
+ "max_streams=%d, stream_mem=%d",
+ session->id, session->timeout_secs, session->keepalive_secs,
+ (int)session->max_stream_count, (int)session->max_stream_mem);
+ }
}
return session;
}
session->max_stream_received,
reason, NULL, 0);
nghttp2_session_send(session->ngh2);
+ session->server_goaway = 1;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
"session(%ld): shutdown, no err", session->id);
}
reason, (const uint8_t *)err,
strlen(err));
nghttp2_session_send(session->ngh2);
+ session->server_goaway = 1;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
"session(%ld): shutdown, err=%d '%s'",
session->id, reason, err);
/* common status for a client that has left */
ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
"h2_session(%ld): input gone", session->id);
- /* Stolen from mod_reqtimeout to speed up lingering when
- * a read timeout happened.
- */
- apr_table_setn(session->c->notes, "short-lingering-close", "1");
}
else {
/* uncommon status, log on INFO so that we see this */
if (!h2_is_acceptable_connection(c, 1)) {
nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
NGHTTP2_INADEQUATE_SECURITY, NULL, 0);
+ nghttp2_session_send(session->ngh2);
+ session->server_goaway = 1;
}
status = h2_session_start(session, &rv);
case H2_SESSION_ST_BUSY:
if (nghttp2_session_want_read(session->ngh2)) {
+ h2_filter_cin_timeout_set(session->cin, session->timeout_secs);
status = h2_session_read(session, 0, 10);
if (status == APR_SUCCESS) {
/* got something, continue processing */
case H2_SESSION_ST_KEEPALIVE:
/* Our normal H2Timeout has passed and we are considering to
- * extend that with the H2KeepAliveTimeout. This works different
- * for async MPMs. */
+ * extend that with the H2KeepAliveTimeout. */
remain_secs = session->keepalive_secs - session->timeout_secs;
- if (!async && remain_secs <= 0) {
- /* not async, keepalive is smaller than normal timeout, close the session */
+ if (remain_secs <= 0) {
+ /* keepalive is <= normal timeout, close the session */
reason = "keepalive expired";
h2_session_shutdown(session, 0);
goto out;
}
+ session->c->keepalive = AP_CONN_KEEPALIVE;
ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_KEEPALIVE, c);
- if (async && session->c->cs) {
+
+ if ((apr_time_sec(session->s->keep_alive_timeout) >= remain_secs)
+ && async && session->c->cs
+ && !session->r) {
/* Async MPMs are able to handle keep-alive connections without
* blocking a thread. For this to happen, we need to return from
* processing, indicating the IO event we are waiting for, and
* may be called again if the event happens.
- * For now, we let the MPM handle any timing on this, so we
- * cannot really enforce the remain_secs here.
+ * TODO: this does not properly GOAWAY connections...
+ * TODO: This currently does not work on upgraded requests...
*/
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_session(%ld): async KEEPALIVE -> BUSY", session->id);
- session->state = H2_SESSION_ST_BUSY;
- session->c->cs->sense = CONN_SENSE_WANT_READ;
+ "h2_session(%ld): async KEEPALIVE -> IDLE_READ", session->id);
+ session->state = H2_SESSION_ST_IDLE_READ;
+ session->c->cs->state = CONN_STATE_WRITE_COMPLETION;
reason = "async keepalive";
status = APR_SUCCESS;
goto out;