From 97d4f59f55395c83a68e2ba33df2dd82da56095e Mon Sep 17 00:00:00 2001 From: Stefan Eissing Date: Tue, 14 Feb 2017 15:26:59 +0000 Subject: [PATCH] On the trunk: *) mod_http2: not counting file buckets again stream max buffer limits. Effectively transfering static files in one step from slave to master connection. [Stefan Eissing] git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1782975 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 6 ++++- modules/http2/h2_bucket_beam.c | 37 ++++++++++++++++------------ modules/http2/h2_bucket_beam.h | 1 + modules/http2/h2_conn_io.c | 44 +++------------------------------- modules/http2/h2_filter.c | 6 ++--- modules/http2/h2_session.h | 3 ++- modules/http2/h2_stream.c | 43 +++++++++++++-------------------- modules/http2/h2_stream.h | 1 + 8 files changed, 54 insertions(+), 87 deletions(-) diff --git a/CHANGES b/CHANGES index 58711575e7..826184ea1a 100644 --- a/CHANGES +++ b/CHANGES @@ -1,7 +1,11 @@ -*- coding: utf-8 -*- Changes with Apache 2.5.0 - *) mod_http2: comforts ap_check_pipeline() on slave connections + *) mod_http2: not counting file buckets again stream max buffer limits. + Effectively transfering static files in one step from slave to master + connection. [Stefan Eissing] + + *) mod_http2: comforting ap_check_pipeline() on slave connections to facilitate reuse (see https://github.com/icing/mod_h2/issues/128). [Stefan Eissing, reported by Armin Abfalterer] diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c index ca57ee9590..f45fa470a4 100644 --- a/modules/http2/h2_bucket_beam.c +++ b/modules/http2/h2_bucket_beam.c @@ -216,6 +216,17 @@ static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl) } } +static apr_off_t bucket_mem_used(apr_bucket *b) +{ + if (APR_BUCKET_IS_FILE(b)) { + return 0; + } + else { + /* should all have determinate length */ + return b->length; + } +} + static int report_consumption(h2_bucket_beam *beam) { int rv = 0; @@ -545,6 +556,7 @@ apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *pool, H2_BLIST_INIT(&beam->hold_list); H2_BLIST_INIT(&beam->purge_list); H2_BPROXY_LIST_INIT(&beam->proxies); + beam->tx_mem_limits = 1; beam->max_buf_size = max_buf_size; apr_pool_pre_cleanup_register(pool, beam, beam_cleanup); @@ -999,14 +1011,15 @@ transfer: for (brecv = APR_BRIGADE_FIRST(bb); brecv != APR_BRIGADE_SENTINEL(bb); brecv = APR_BUCKET_NEXT(brecv)) { - remain -= brecv->length; - if (remain < 0) { - apr_bucket_split(brecv, brecv->length+remain); - beam->recv_buffer = apr_brigade_split_ex(bb, - APR_BUCKET_NEXT(brecv), - beam->recv_buffer); - break; - } + remain -= (beam->tx_mem_limits? bucket_mem_used(brecv) + : brecv->length); + if (remain < 0) { + apr_bucket_split(brecv, brecv->length+remain); + beam->recv_buffer = apr_brigade_split_ex(bb, + APR_BUCKET_NEXT(brecv), + beam->recv_buffer); + break; + } } } @@ -1123,13 +1136,7 @@ apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam) for (b = H2_BLIST_FIRST(&beam->send_list); b != H2_BLIST_SENTINEL(&beam->send_list); b = APR_BUCKET_NEXT(b)) { - if (APR_BUCKET_IS_FILE(b)) { - /* do not count */ - } - else { - /* should all have determinate length */ - l += b->length; - } + l += bucket_mem_used(b); } leave_yellow(beam, &bl); } diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h index 265ee5d9dc..f5fe7755a6 100644 --- a/modules/http2/h2_bucket_beam.h +++ b/modules/http2/h2_bucket_beam.h @@ -188,6 +188,7 @@ struct h2_bucket_beam { unsigned int aborted : 1; unsigned int closed : 1; unsigned int close_sent : 1; + unsigned int tx_mem_limits : 1; /* only memory size counts on transfers */ void *m_ctx; h2_beam_mutex_enter *m_enter; diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c index f1f0de49ac..81cd65a4b0 100644 --- a/modules/http2/h2_conn_io.c +++ b/modules/http2/h2_conn_io.c @@ -38,6 +38,7 @@ * - TLS overhead (60-100) * ~= 1300 bytes */ #define WRITE_SIZE_INITIAL 1300 + /* Calculated like this: max TLS record size 16*1024 * - 40 (IP) - 20 (TCP) - 40 (TCP options) * - TLS overhead (60-100) @@ -116,7 +117,7 @@ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level, line = *buffer? buffer : "(empty)"; } /* Intentional no APLOGNO */ - ap_log_cerror(APLOG_MARK, level, 0, c, "bb_dump(%s)-%s: %s", + ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%s)-%s: %s", c->log_id, tag, line); } @@ -157,8 +158,6 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, return APR_SUCCESS; } -#define LOG_SCRATCH 0 - static void append_scratch(h2_conn_io *io) { if (io->scratch && io->slen > 0) { @@ -166,11 +165,6 @@ static void append_scratch(h2_conn_io *io) apr_bucket_free, io->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); -#if LOG_SCRATCH - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03386) - "h2_conn_io(%ld): append_scratch(%ld)", - io->c->id, (long)io->slen); -#endif io->scratch = NULL; io->slen = io->ssize = 0; } @@ -218,11 +212,6 @@ static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b) return status; } status = apr_file_read(fd, io->scratch + io->slen, &len); -#if LOG_SCRATCH - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->c, APLOGNO(03387) - "h2_conn_io(%ld): FILE_to_scratch(%ld)", - io->c->id, (long)len); -#endif if (status != APR_SUCCESS && status != APR_EOF) { return status; } @@ -231,11 +220,6 @@ static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b) else { status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS) { -#if LOG_SCRATCH - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03388) - "h2_conn_io(%ld): read_to_scratch(%ld)", - io->c->id, (long)b->length); -#endif memcpy(io->scratch+io->slen, data, len); io->slen += len; } @@ -251,17 +235,11 @@ static void check_write_size(h2_conn_io *io) /* long time not written, reset write size */ io->write_size = WRITE_SIZE_INITIAL; io->bytes_written = 0; - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, - "h2_conn_io(%ld): timeout write size reset to %ld", - (long)io->c->id, (long)io->write_size); } else if (io->write_size < WRITE_SIZE_MAX && io->bytes_written >= io->warmup_size) { /* connection is hot, use max size */ io->write_size = WRITE_SIZE_MAX; - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, - "h2_conn_io(%ld): threshold reached, write size now %ld", - (long)io->c->id, (long)io->write_size); } } @@ -283,11 +261,10 @@ static apr_status_t pass_output(h2_conn_io *io, int flush) return APR_SUCCESS; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, "h2_conn_io: pass_output"); ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL); apr_brigade_length(bb, 0, &bblen); + h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "out", bb); - h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", bb); status = ap_pass_brigade(c->output_filters, bb); if (status == APR_SUCCESS) { io->bytes_written += (apr_size_t)bblen; @@ -342,21 +319,11 @@ apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length) while (length > 0) { remain = assure_scratch_space(io); if (remain >= length) { -#if LOG_SCRATCH - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03389) - "h2_conn_io(%ld): write_to_scratch(%ld)", - io->c->id, (long)length); -#endif memcpy(io->scratch + io->slen, data, length); io->slen += length; length = 0; } else { -#if LOG_SCRATCH - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03390) - "h2_conn_io(%ld): write_to_scratch(%ld)", - io->c->id, (long)remain); -#endif memcpy(io->scratch + io->slen, data, remain); io->slen += remain; data += remain; @@ -397,11 +364,6 @@ apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb) /* complete write_size bucket, append unchanged */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(io->output, b); -#if LOG_SCRATCH - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03391) - "h2_conn_io(%ld): pass bucket(%ld)", - io->c->id, (long)b->length); -#endif continue; } } diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c index 8c6ffa32f0..617d350d59 100644 --- a/modules/http2/h2_filter.c +++ b/modules/http2/h2_filter.c @@ -112,7 +112,7 @@ apr_status_t h2_filter_core_input(ap_filter_t* f, apr_interval_time_t saved_timeout = UNSET; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "core_input(%ld): read, %s, mode=%d, readbytes=%ld", + "h2_session(%ld): read, %s, mode=%d, readbytes=%ld", (long)f->c->id, (block == APR_BLOCK_READ)? "BLOCK_READ" : "NONBLOCK_READ", mode, (long)readbytes); @@ -161,11 +161,11 @@ apr_status_t h2_filter_core_input(ap_filter_t* f, case APR_EAGAIN: case APR_TIMEUP: ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "core_input(%ld): read", f->c->id); + "h2_session(%ld): read", f->c->id); break; default: ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046) - "core_input(%ld): error reading", f->c->id); + "h2_session(%ld): error reading", f->c->id); break; } return status; diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h index e674e35035..76a03f3c41 100644 --- a/modules/http2/h2_session.h +++ b/modules/http2/h2_session.h @@ -200,7 +200,8 @@ apr_status_t h2_session_set_prio(h2_session *session, const struct h2_priority *prio); #define H2_SSSN_MSG(s, msg) \ - "h2_session(%ld,%s): "msg, s->id, h2_session_state_str(s->state) + "h2_session(%ld,%s,%d): "msg, s->id, h2_session_state_str(s->state), \ + s->open_streams #define H2_SSSN_LOG(aplogno, s, msg) aplogno H2_SSSN_MSG(s, msg) diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c index abc85fd4ac..0a893525ee 100644 --- a/modules/http2/h2_stream.c +++ b/modules/http2/h2_stream.c @@ -162,12 +162,11 @@ static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag) if (APLOG_C_IS_LEVEL(s->session->c, lvl)) { conn_rec *c = s->session->c; char buffer[4 * 1024]; - const char *line = "(null)"; apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer); - ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s", - c->log_id, len? buffer : line); + ap_log_cerror(APLOG_MARK, lvl, 0, c, + H2_STRM_MSG(s, "out-buffer(%s)"), len? buffer : "empty"); } } @@ -477,6 +476,7 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session, stream->pool = pool; stream->session = session; stream->monitor = monitor; + stream->max_mem = session->max_stream_mem; h2_beam_create(&stream->input, pool, id, "input", H2_BEAM_OWNER_SEND, 0); h2_beam_send_from(stream->input, stream->pool); @@ -675,20 +675,6 @@ apr_status_t h2_stream_add_header(h2_stream *stream, return status; } -static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount) -{ - apr_status_t status; - - if (!stream->output) { - return APR_EOF; - } - status = h2_beam_receive(stream->output, stream->out_buffer, - APR_NONBLOCK_READ, amount); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c, - H2_STRM_MSG(stream, "beam_received")); - return status; -} - static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb) { if (bb) { @@ -723,31 +709,36 @@ apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, c = stream->session->c; prep_output(stream); - + + /* determine how much we'd like to send. We cannot send more than + * is requested. But we can reduce the size in case the master + * connection operates in smaller chunks. (TSL warmup) */ if (stream->session->io.write_size > 0) { max_chunk = stream->session->io.write_size - 9; /* header bits */ } *plen = requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk; - H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_pre"); h2_util_bb_avail(stream->out_buffer, plen, peos); - if (!*peos && *plen < requested) { - /* try to get more data */ - status = fill_buffer(stream, (requested - *plen) + H2_DATA_CHUNK_SIZE); + if (!*peos && *plen < requested && *plen < stream->max_mem) { + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre"); + status = h2_beam_receive(stream->output, stream->out_buffer, + APR_NONBLOCK_READ, stream->max_mem - *plen); if (APR_STATUS_IS_EOF(status)) { apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos); status = APR_SUCCESS; } else if (status == APR_EAGAIN) { - /* did not receive more, it's ok */ status = APR_SUCCESS; } *plen = requested; h2_util_bb_avail(stream->out_buffer, plen, peos); + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "post"); } - H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_post"); - + else { + H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "ok"); + } + b = APR_BRIGADE_FIRST(stream->out_buffer); while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { e = APR_BUCKET_NEXT(b); @@ -761,7 +752,7 @@ apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, } b = e; } - + b = get_first_headers_bucket(stream->out_buffer); if (b) { /* there are HEADERS to submit */ diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h index c9466f29e1..86b6da898f 100644 --- a/modules/http2/h2_stream.h +++ b/modules/http2/h2_stream.h @@ -70,6 +70,7 @@ struct h2_stream { struct h2_bucket_beam *input; struct h2_bucket_beam *output; + apr_size_t max_mem; /* maximum amount of data buffered */ apr_bucket_brigade *out_buffer; int rst_error; /* stream error for RST_STREAM */ -- 2.50.1