From 28d50b0579d29e127bc29e7c268ecaa83956bb51 Mon Sep 17 00:00:00 2001 From: Stefan Eissing Date: Wed, 9 Mar 2016 11:51:25 +0000 Subject: [PATCH] mod_proxy_http2: single engine per type per master connection, removing some race conditions on engine exit, mod_http2: better debug logging by tagging slave connection with task id git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1734221 13f79535-47bb-0310-9956-ffa450edef68 --- modules/http2/h2.h | 5 ++ modules/http2/h2_conn.c | 3 ++ modules/http2/h2_io.c | 7 ++- modules/http2/h2_io.h | 1 + modules/http2/h2_mplx.c | 30 ++++++------ modules/http2/h2_mplx.h | 1 + modules/http2/h2_ngn_shed.c | 79 +++++++++++++++----------------- modules/http2/h2_ngn_shed.h | 4 +- modules/http2/h2_proxy_session.c | 25 +++++----- modules/http2/h2_session.c | 15 ++++-- modules/http2/mod_http2.h | 2 - modules/http2/mod_proxy_http2.c | 15 ++++-- 12 files changed, 104 insertions(+), 83 deletions(-) diff --git a/modules/http2/h2.h b/modules/http2/h2.h index 5429444462..89d174e237 100644 --- a/modules/http2/h2.h +++ b/modules/http2/h2.h @@ -139,4 +139,9 @@ struct h2_response { }; +/* Note key to attach connection task id to conn_rec/request_rec instances */ + +#define H2_TASK_ID_NOTE "http2-task-id" + + #endif /* defined(__mod_h2__h2__) */ diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c index e7bb1dd1e1..a0cd54e6ac 100644 --- a/modules/http2/h2_conn.c +++ b/modules/http2/h2_conn.c @@ -310,6 +310,9 @@ conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent, void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator) { apr_allocator_t *allocator = apr_pool_allocator_get(slave->pool); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave, + "h2_slave_conn(%ld): destroy (task=%s)", slave->id, + apr_table_get(slave->notes, H2_TASK_ID_NOTE)); apr_pool_destroy(slave->pool); if (pallocator) { *pallocator = allocator; diff --git a/modules/http2/h2_io.c b/modules/http2/h2_io.c index a54e8763b7..3f82c60f10 100644 --- a/modules/http2/h2_io.c +++ b/modules/http2/h2_io.c @@ -357,6 +357,7 @@ apr_status_t h2_io_out_readx(h2_io *io, status = h2_util_bb_readx(io->bbout, cb, ctx, plen, peos); if (status == APR_SUCCESS) { io->eos_out_read = *peos; + io->output_consumed += *plen; } } @@ -366,6 +367,8 @@ apr_status_t h2_io_out_readx(h2_io *io, apr_status_t h2_io_out_read_to(h2_io *io, apr_bucket_brigade *bb, apr_off_t *plen, int *peos) { + apr_status_t status; + if (io->rst_error) { return APR_ECONNABORTED; } @@ -382,7 +385,9 @@ apr_status_t h2_io_out_read_to(h2_io *io, apr_bucket_brigade *bb, } io->eos_out_read = *peos = h2_util_has_eos(io->bbout, *plen); - return h2_util_move(bb, io->bbout, *plen, NULL, "h2_io_read_to"); + status = h2_util_move(bb, io->bbout, *plen, NULL, "h2_io_read_to"); + io->output_consumed += *plen; + return status; } static void process_trailers(h2_io *io, apr_table_t *trailers) diff --git a/modules/http2/h2_io.h b/modules/http2/h2_io.h index bfe42a96b4..d92b7eb0d4 100644 --- a/modules/http2/h2_io.h +++ b/modules/http2/h2_io.h @@ -65,6 +65,7 @@ struct h2_io { apr_time_t started_at; /* when processing started */ apr_time_t done_at; /* when processing was done */ apr_size_t input_consumed; /* how many bytes have been read */ + apr_size_t output_consumed; /* how many bytes have been written out */ int files_handles_owned; }; diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c index 117becac81..4d7f63bb52 100644 --- a/modules/http2/h2_mplx.c +++ b/modules/http2/h2_mplx.c @@ -201,10 +201,11 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, return NULL; } - m->q = h2_iq_create(m->pool, h2_config_geti(conf, H2_CONF_MAX_STREAMS)); + m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS); + m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); + m->q = h2_iq_create(m->pool, m->max_streams); m->stream_ios = h2_io_set_create(m->pool); m->ready_ios = h2_io_set_create(m->pool); - m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->stream_timeout = stream_timeout; m->workers = workers; m->workers_max = workers->max_workers; @@ -216,7 +217,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, m->tx_handles_reserved = 0; m->tx_chunk_size = 4; - m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->stream_max_mem); + m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, + m->stream_max_mem); h2_ngn_shed_set_ctx(m->ngn_shed , m); } return m; @@ -1085,6 +1087,7 @@ static h2_task *pop_task(h2_mplx *m) conn_rec *slave = h2_slave_create(m->c, m->pool, m->spare_allocator); m->spare_allocator = NULL; task = h2_task_create(m->id, io->request, slave, m); + apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id); io->worker_started = 1; io->started_at = apr_time_now(); if (sid > m->max_stream_started) { @@ -1124,16 +1127,20 @@ static void task_done(h2_mplx *m, h2_task *task) { if (task) { if (task->frozen) { - /* this task was handed over to an engine for processing */ + /* this task was handed over to an engine for processing + * and the original worker has finished. That means the + * engine may start processing now. */ h2_task_thaw(task); - /* TODO: not implemented yet... */ - /*h2_task_set_io_blocking(task, 0);*/ + /* we do not want the task to block on writing response + * bodies into the mplx. */ + /* FIXME: this implementation is incomplete. */ + h2_task_set_io_blocking(task, 0); apr_thread_cond_broadcast(m->req_added); } else { h2_io *io = h2_io_set_get(m->stream_ios, task->stream_id); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): task(%s) done", m->id, task->id); /* clean our references and report request as done. Signal * that we want another unless we have been aborted */ @@ -1143,8 +1150,6 @@ static void task_done(h2_mplx *m, h2_task *task) h2_mplx_out_close(m, task->stream_id, NULL); if (task->engine) { - /* should already have been done by the task, but as - * a last resort, we get rid of it here. */ if (!h2_req_engine_is_shutdown(task->engine)) { ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, "h2_mplx(%ld): task(%s) has not-shutdown " @@ -1369,7 +1374,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m) ******************************************************************************/ apr_status_t h2_mplx_req_engine_push(const char *ngn_type, - request_rec *r, h2_req_engine_init *einit) + request_rec *r, h2_req_engine_init *einit) { apr_status_t status; h2_mplx *m; @@ -1381,7 +1386,6 @@ apr_status_t h2_mplx_req_engine_push(const char *ngn_type, return APR_ECONNABORTED; } m = task->mplx; - AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, task->stream_id); @@ -1389,9 +1393,9 @@ apr_status_t h2_mplx_req_engine_push(const char *ngn_type, status = APR_ECONNABORTED; } else { - status = h2_ngn_shed_push_req(m->ngn_shed, ngn_type, task, r, einit); + status = h2_ngn_shed_push_req(m->ngn_shed, ngn_type, + task, r, einit); } - leave_mutex(m, acquired); } return status; diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h index 497cf99213..a61a63891a 100644 --- a/modules/http2/h2_mplx.h +++ b/modules/http2/h2_mplx.h @@ -76,6 +76,7 @@ struct h2_mplx { struct h2_io_set *ready_ios; struct h2_io_set *redo_ios; + apr_uint32_t max_streams; /* max # of concurrent streams */ apr_uint32_t max_stream_started; /* highest stream id that started processing */ apr_uint32_t workers_busy; /* # of workers processing on this mplx */ apr_uint32_t workers_limit; /* current # of workers limit, dynamic */ diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c index b23a5e95f3..4a5ea096fd 100644 --- a/modules/http2/h2_ngn_shed.c +++ b/modules/http2/h2_ngn_shed.c @@ -77,6 +77,7 @@ struct h2_req_engine { h2_ngn_shed *shed; unsigned int shutdown : 1; /* engine is being shut down */ + unsigned int done : 1; /* engine has finished */ APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries; apr_uint32_t capacity; /* maximum concurrent requests */ @@ -96,6 +97,7 @@ int h2_req_engine_is_shutdown(h2_req_engine *engine) } h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, + apr_uint32_t default_capacity, apr_uint32_t req_buffer_size) { h2_ngn_shed *shed; @@ -103,6 +105,7 @@ h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, shed = apr_pcalloc(pool, sizeof(*shed)); shed->c = c; shed->pool = pool; + shed->default_capacity = default_capacity; shed->req_buffer_size = req_buffer_size; shed->ngns = apr_hash_make(pool); @@ -146,63 +149,53 @@ apr_status_t h2_ngn_shed_push_req(h2_ngn_shed *shed, const char *ngn_type, AP_DEBUG_ASSERT(shed); - apr_table_set(r->connection->notes, H2_TASK_ID_NOTE, task->id); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id, + apr_table_get(r->connection->notes, H2_TASK_ID_NOTE)); if (task->ser_headers) { /* Max compatibility, deny processing of this */ return APR_EOF; } ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING); - if (ngn) { - if (ngn->shutdown) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, - "h2_ngn_shed(%ld): %s in shutdown", - shed->c->id, ngn->id); - } - else if (ngn->no_assigned >= ngn->capacity) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, - "h2_ngn_shed(%ld): %s over capacity %d/%d", - shed->c->id, ngn->id, ngn->no_assigned, - ngn->capacity); - } - else { - /* this task will be processed in another thread, - * freeze any I/O for the time being. */ - h2_task_freeze(task, r); - ngn_add_req(ngn, task, r); - ngn->no_assigned++; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, - "h2_ngn_shed(%ld): pushed request %s to %s", - shed->c->id, task->id, ngn->id); - return APR_SUCCESS; - } + if (ngn && !ngn->shutdown) { + /* this task will be processed in another thread, + * freeze any I/O for the time being. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_ngn_shed(%ld): pushing request %s to %s", + shed->c->id, task->id, ngn->id); + h2_task_freeze(task, r); + /* FIXME: sometimes ngn is garbage, probly alread freed */ + ngn_add_req(ngn, task, r); + ngn->no_assigned++; + return APR_SUCCESS; } - /* none of the existing engines has capacity */ + /* no existing engine or being shut down, start a new one */ if (einit) { apr_status_t status; + apr_pool_t *pool = task->c->pool; h2_req_engine *newngn; - newngn = apr_pcalloc(task->c->pool, sizeof(*ngn)); - newngn->id = apr_psprintf(task->c->pool, "ngn-%ld-%d", - shed->c->id, shed->next_ngn_id++); - newngn->pool = task->c->pool; - newngn->type = apr_pstrdup(task->c->pool, ngn_type); - newngn->c = r->connection; - APR_RING_INIT(&newngn->entries, h2_ngn_entry, link); + newngn = apr_pcalloc(pool, sizeof(*ngn)); + newngn->pool = pool; + newngn->id = apr_psprintf(pool, "ngn-%s", task->id); + newngn->type = apr_pstrdup(pool, ngn_type); + newngn->c = task->c; newngn->shed = shed; - newngn->capacity = 100; + newngn->capacity = shed->default_capacity; newngn->no_assigned = 1; newngn->no_live = 1; + APR_RING_INIT(&newngn->entries, h2_ngn_entry, link); status = einit(newngn, newngn->id, newngn->type, newngn->pool, shed->req_buffer_size, r); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, - "h2_ngn_shed(%ld): init engine %s (%s)", + "h2_ngn_shed(%ld): create engine %s (%s)", shed->c->id, newngn->id, newngn->type); if (status == APR_SUCCESS) { - newngn->task = task; AP_DEBUG_ASSERT(task->engine == NULL); + newngn->task = task; task->engine = newngn; apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn); } @@ -290,7 +283,9 @@ apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) { - h2_req_engine *existing; + if (ngn->done) { + return; + } if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { h2_ngn_entry *entry; @@ -323,13 +318,11 @@ void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) (long)ngn->no_finished); } else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, - "h2_ngn_shed(%ld): exit engine %s (%s)", - shed->c->id, ngn->id, ngn->type); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): exit engine %s", + shed->c->id, ngn->id); } - existing = apr_hash_get(shed->ngns, ngn->type, APR_HASH_KEY_STRING); - if (existing == ngn) { - apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL); - } + apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL); + ngn->done = 1; } diff --git a/modules/http2/h2_ngn_shed.h b/modules/http2/h2_ngn_shed.h index 887f750d13..3dc9e375ef 100644 --- a/modules/http2/h2_ngn_shed.h +++ b/modules/http2/h2_ngn_shed.h @@ -24,10 +24,11 @@ struct h2_ngn_shed { conn_rec *c; apr_pool_t *pool; apr_hash_t *ngns; - int next_ngn_id; void *user_ctx; unsigned int aborted : 1; + + apr_uint32_t default_capacity; apr_uint32_t req_buffer_size; /* preferred buffer size for responses */ }; @@ -42,6 +43,7 @@ typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine, request_rec *r); h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, + apr_uint32_t default_capactiy, apr_uint32_t req_buffer_size); void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx); diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c index d2581becc5..217b36410a 100644 --- a/modules/http2/h2_proxy_session.c +++ b/modules/http2/h2_proxy_session.c @@ -130,6 +130,7 @@ static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame, void *user_data) { h2_proxy_session *session = user_data; + int n; if (APLOGcdebug(session->c)) { char buffer[256]; @@ -147,7 +148,10 @@ static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame, break; case NGHTTP2_SETTINGS: if (frame->settings.niv > 0) { - session->remote_max_concurrent = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS); + n = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS); + if (n > 0) { + session->remote_max_concurrent = n; + } } break; case NGHTTP2_GOAWAY: @@ -156,7 +160,7 @@ static int on_frame_recv(nghttp2_session *ngh2, const nghttp2_frame *frame, char buffer[256]; h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c, APLOGNO(03342) + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03342) "h2_proxy_session(%s): recv FRAME[%s]", session->id, buffer); } @@ -253,7 +257,6 @@ static apr_status_t h2_proxy_stream_add_header_out(h2_proxy_stream *stream, static int log_header(void *ctx, const char *key, const char *value) { h2_proxy_stream *stream = ctx; - ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r, "h2_proxy_stream(%s-%d), header_out %s: %s", stream->session->id, stream->id, key, value); @@ -329,7 +332,6 @@ static int on_data_chunk_recv(nghttp2_session *ngh2, uint8_t flags, apr_bucket *b; apr_status_t status; - /*nghttp2_session_consume(ngh2, stream_id, len);*/ stream = nghttp2_session_get_stream_user_data(ngh2, stream_id); if (!stream) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, stream->r, @@ -352,8 +354,6 @@ static int on_data_chunk_recv(nghttp2_session *ngh2, uint8_t flags, if (flags & NGHTTP2_DATA_FLAG_EOF) { b = apr_bucket_flush_create(stream->r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(stream->output, b); - /*b = apr_bucket_eos_create(stream->r->connection->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(stream->output, b);*/ } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, stream->r, @@ -368,6 +368,7 @@ static int on_data_chunk_recv(nghttp2_session *ngh2, uint8_t flags, stream_id, NGHTTP2_STREAM_CLOSED); return NGHTTP2_ERR_STREAM_CLOSING; } + nghttp2_session_consume(ngh2, stream_id, len); return 0; } @@ -530,7 +531,7 @@ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn, nghttp2_option_new(&option); nghttp2_option_set_peer_max_concurrent_streams(option, 100); - nghttp2_option_set_no_auto_window_update(option, 0); + nghttp2_option_set_no_auto_window_update(option, 1); nghttp2_session_client_new2(&session->ngh2, cbs, session, option); @@ -638,12 +639,10 @@ static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *st hd->nv, hd->nvlen, pp, stream); if (APLOGcdebug(session->c)) { - const char *task_id = apr_table_get(stream->r->connection->notes, - H2_TASK_ID_NOTE); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - "h2_proxy_session(%s): submit %s%s -> %d (task %s)", + "h2_proxy_session(%s): submit %s%s -> %d", session->id, stream->req->authority, stream->req->path, - rv, task_id); + rv); } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, @@ -751,7 +750,7 @@ static apr_status_t h2_proxy_session_read(h2_proxy_session *session, int block, /* nop */ } else if (!APR_STATUS_IS_EAGAIN(status)) { - ap_log_cerror(APLOG_MARK, APLOG_INFO, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, "h2_proxy_session(%s): read error", session->id); dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL); } @@ -1294,7 +1293,7 @@ void h2_proxy_session_cleanup(h2_proxy_session *session, cleanup_iter_ctx ctx; ctx.session = session; ctx.done = done; - ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, "h2_proxy_session(%s): terminated, %d streams unfinished", session->id, (int)h2_ihash_count(session->streams)); h2_ihash_iter(session->streams, cleanup_iter, &ctx); diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c index ecd8ae0061..9475eb866f 100644 --- a/modules/http2/h2_session.c +++ b/modules/http2/h2_session.c @@ -1561,7 +1561,7 @@ static apr_status_t h2_session_read(h2_session *session, int block) } else { /* uncommon status, log on INFO so that we see this */ - ap_log_cerror( APLOG_MARK, APLOG_INFO, status, c, + ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(02950) "h2_session(%ld): error reading, terminating", session->id); @@ -1745,7 +1745,7 @@ static void h2_session_ev_conn_error(h2_session *session, int arg, const char *m break; default: - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): conn error -> shutdown", session->id); h2_session_shutdown(session, arg, msg, 0); break; @@ -1762,7 +1762,7 @@ static void h2_session_ev_proto_error(h2_session *session, int arg, const char * break; default: - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): proto error -> shutdown", session->id); h2_session_shutdown(session, arg, msg, 0); break; @@ -2157,7 +2157,8 @@ apr_status_t h2_session_process(h2_session *session, int async) /* waited long enough */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, APR_TIMEUP, c, "h2_session: wait for data"); - dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL); + dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout"); + break; } else { /* repeating, increase timer for graceful backoff */ @@ -2182,7 +2183,11 @@ apr_status_t h2_session_process(h2_session *session, int async) transit(session, "wait cycle", H2_SESSION_ST_BUSY); } else { - h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR, "cond wait error", 0); + ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c, + "h2_session(%ld): waiting on conditional", + session->id); + h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR, + "cond wait error", 0); } break; diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h index 3220700d87..c5cfe704e3 100644 --- a/modules/http2/mod_http2.h +++ b/modules/http2/mod_http2.h @@ -86,6 +86,4 @@ APR_DECLARE_OPTIONAL_FN(apr_status_t, APR_DECLARE_OPTIONAL_FN(void, http2_req_engine_done, (h2_req_engine *engine, conn_rec *rconn)); -#define H2_TASK_ID_NOTE "http2-task-id" - #endif diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c index 0a752a8410..ab1bb09d2e 100644 --- a/modules/http2/mod_proxy_http2.c +++ b/modules/http2/mod_proxy_http2.c @@ -305,7 +305,10 @@ static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) { if (status == APR_SUCCESS) { apr_status_t s2; /* ongoing processing, call again */ - ctx->capacity = H2MAX(100, session->remote_max_concurrent); + if (session->remote_max_concurrent > 0 + && session->remote_max_concurrent != ctx->capacity) { + ctx->capacity = session->remote_max_concurrent; + } s2 = next_request(ctx, 0); if (s2 == APR_ECONNABORTED) { /* master connection gone */ @@ -528,10 +531,12 @@ run_session: cleanup: if (ctx->engine && next_request(ctx, 1) == APR_SUCCESS) { /* Still more to do, tear down old conn and start over */ - ctx->p_conn->close = 1; - proxy_run_detach_backend(r, ctx->p_conn); - ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); - ctx->p_conn = NULL; + if (ctx->p_conn) { + ctx->p_conn->close = 1; + proxy_run_detach_backend(r, ctx->p_conn); + ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); + ctx->p_conn = NULL; + } goto run_connect; } -- 2.40.0