-*- coding: utf-8 -*-
Changes with Apache 2.5.0
- *) mod_http2: comforts ap_check_pipeline() on slave connections
+ *) mod_http2: not counting file buckets again stream max buffer limits.
+ Effectively transfering static files in one step from slave to master
+ connection. [Stefan Eissing]
+
+ *) mod_http2: comforting ap_check_pipeline() on slave connections
to facilitate reuse (see https://github.com/icing/mod_h2/issues/128).
[Stefan Eissing, reported by Armin Abfalterer]
}
}
+static apr_off_t bucket_mem_used(apr_bucket *b)
+{
+ if (APR_BUCKET_IS_FILE(b)) {
+ return 0;
+ }
+ else {
+ /* should all have determinate length */
+ return b->length;
+ }
+}
+
static int report_consumption(h2_bucket_beam *beam)
{
int rv = 0;
H2_BLIST_INIT(&beam->hold_list);
H2_BLIST_INIT(&beam->purge_list);
H2_BPROXY_LIST_INIT(&beam->proxies);
+ beam->tx_mem_limits = 1;
beam->max_buf_size = max_buf_size;
apr_pool_pre_cleanup_register(pool, beam, beam_cleanup);
for (brecv = APR_BRIGADE_FIRST(bb);
brecv != APR_BRIGADE_SENTINEL(bb);
brecv = APR_BUCKET_NEXT(brecv)) {
- remain -= brecv->length;
- if (remain < 0) {
- apr_bucket_split(brecv, brecv->length+remain);
- beam->recv_buffer = apr_brigade_split_ex(bb,
- APR_BUCKET_NEXT(brecv),
- beam->recv_buffer);
- break;
- }
+ remain -= (beam->tx_mem_limits? bucket_mem_used(brecv)
+ : brecv->length);
+ if (remain < 0) {
+ apr_bucket_split(brecv, brecv->length+remain);
+ beam->recv_buffer = apr_brigade_split_ex(bb,
+ APR_BUCKET_NEXT(brecv),
+ beam->recv_buffer);
+ break;
+ }
}
}
for (b = H2_BLIST_FIRST(&beam->send_list);
b != H2_BLIST_SENTINEL(&beam->send_list);
b = APR_BUCKET_NEXT(b)) {
- if (APR_BUCKET_IS_FILE(b)) {
- /* do not count */
- }
- else {
- /* should all have determinate length */
- l += b->length;
- }
+ l += bucket_mem_used(b);
}
leave_yellow(beam, &bl);
}
unsigned int aborted : 1;
unsigned int closed : 1;
unsigned int close_sent : 1;
+ unsigned int tx_mem_limits : 1; /* only memory size counts on transfers */
void *m_ctx;
h2_beam_mutex_enter *m_enter;
* - TLS overhead (60-100)
* ~= 1300 bytes */
#define WRITE_SIZE_INITIAL 1300
+
/* Calculated like this: max TLS record size 16*1024
* - 40 (IP) - 20 (TCP) - 40 (TCP options)
* - TLS overhead (60-100)
line = *buffer? buffer : "(empty)";
}
/* Intentional no APLOGNO */
- ap_log_cerror(APLOG_MARK, level, 0, c, "bb_dump(%s)-%s: %s",
+ ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%s)-%s: %s",
c->log_id, tag, line);
}
return APR_SUCCESS;
}
-#define LOG_SCRATCH 0
-
static void append_scratch(h2_conn_io *io)
{
if (io->scratch && io->slen > 0) {
apr_bucket_free,
io->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(io->output, b);
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03386)
- "h2_conn_io(%ld): append_scratch(%ld)",
- io->c->id, (long)io->slen);
-#endif
io->scratch = NULL;
io->slen = io->ssize = 0;
}
return status;
}
status = apr_file_read(fd, io->scratch + io->slen, &len);
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->c, APLOGNO(03387)
- "h2_conn_io(%ld): FILE_to_scratch(%ld)",
- io->c->id, (long)len);
-#endif
if (status != APR_SUCCESS && status != APR_EOF) {
return status;
}
else {
status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status == APR_SUCCESS) {
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03388)
- "h2_conn_io(%ld): read_to_scratch(%ld)",
- io->c->id, (long)b->length);
-#endif
memcpy(io->scratch+io->slen, data, len);
io->slen += len;
}
/* long time not written, reset write size */
io->write_size = WRITE_SIZE_INITIAL;
io->bytes_written = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
- "h2_conn_io(%ld): timeout write size reset to %ld",
- (long)io->c->id, (long)io->write_size);
}
else if (io->write_size < WRITE_SIZE_MAX
&& io->bytes_written >= io->warmup_size) {
/* connection is hot, use max size */
io->write_size = WRITE_SIZE_MAX;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
- "h2_conn_io(%ld): threshold reached, write size now %ld",
- (long)io->c->id, (long)io->write_size);
}
}
return APR_SUCCESS;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, "h2_conn_io: pass_output");
ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL);
apr_brigade_length(bb, 0, &bblen);
+ h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "out", bb);
- h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", bb);
status = ap_pass_brigade(c->output_filters, bb);
if (status == APR_SUCCESS) {
io->bytes_written += (apr_size_t)bblen;
while (length > 0) {
remain = assure_scratch_space(io);
if (remain >= length) {
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03389)
- "h2_conn_io(%ld): write_to_scratch(%ld)",
- io->c->id, (long)length);
-#endif
memcpy(io->scratch + io->slen, data, length);
io->slen += length;
length = 0;
}
else {
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03390)
- "h2_conn_io(%ld): write_to_scratch(%ld)",
- io->c->id, (long)remain);
-#endif
memcpy(io->scratch + io->slen, data, remain);
io->slen += remain;
data += remain;
/* complete write_size bucket, append unchanged */
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(io->output, b);
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03391)
- "h2_conn_io(%ld): pass bucket(%ld)",
- io->c->id, (long)b->length);
-#endif
continue;
}
}
apr_interval_time_t saved_timeout = UNSET;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "core_input(%ld): read, %s, mode=%d, readbytes=%ld",
+ "h2_session(%ld): read, %s, mode=%d, readbytes=%ld",
(long)f->c->id, (block == APR_BLOCK_READ)?
"BLOCK_READ" : "NONBLOCK_READ", mode, (long)readbytes);
case APR_EAGAIN:
case APR_TIMEUP:
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "core_input(%ld): read", f->c->id);
+ "h2_session(%ld): read", f->c->id);
break;
default:
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046)
- "core_input(%ld): error reading", f->c->id);
+ "h2_session(%ld): error reading", f->c->id);
break;
}
return status;
const struct h2_priority *prio);
#define H2_SSSN_MSG(s, msg) \
- "h2_session(%ld,%s): "msg, s->id, h2_session_state_str(s->state)
+ "h2_session(%ld,%s,%d): "msg, s->id, h2_session_state_str(s->state), \
+ s->open_streams
#define H2_SSSN_LOG(aplogno, s, msg) aplogno H2_SSSN_MSG(s, msg)
if (APLOG_C_IS_LEVEL(s->session->c, lvl)) {
conn_rec *c = s->session->c;
char buffer[4 * 1024];
- const char *line = "(null)";
apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer);
- ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s",
- c->log_id, len? buffer : line);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c,
+ H2_STRM_MSG(s, "out-buffer(%s)"), len? buffer : "empty");
}
}
stream->pool = pool;
stream->session = session;
stream->monitor = monitor;
+ stream->max_mem = session->max_stream_mem;
h2_beam_create(&stream->input, pool, id, "input", H2_BEAM_OWNER_SEND, 0);
h2_beam_send_from(stream->input, stream->pool);
return status;
}
-static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
-{
- apr_status_t status;
-
- if (!stream->output) {
- return APR_EOF;
- }
- status = h2_beam_receive(stream->output, stream->out_buffer,
- APR_NONBLOCK_READ, amount);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
- H2_STRM_MSG(stream, "beam_received"));
- return status;
-}
-
static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
{
if (bb) {
c = stream->session->c;
prep_output(stream);
-
+
+ /* determine how much we'd like to send. We cannot send more than
+ * is requested. But we can reduce the size in case the master
+ * connection operates in smaller chunks. (TSL warmup) */
if (stream->session->io.write_size > 0) {
max_chunk = stream->session->io.write_size - 9; /* header bits */
}
*plen = requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk;
- H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_pre");
h2_util_bb_avail(stream->out_buffer, plen, peos);
- if (!*peos && *plen < requested) {
- /* try to get more data */
- status = fill_buffer(stream, (requested - *plen) + H2_DATA_CHUNK_SIZE);
+ if (!*peos && *plen < requested && *plen < stream->max_mem) {
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre");
+ status = h2_beam_receive(stream->output, stream->out_buffer,
+ APR_NONBLOCK_READ, stream->max_mem - *plen);
if (APR_STATUS_IS_EOF(status)) {
apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos);
status = APR_SUCCESS;
}
else if (status == APR_EAGAIN) {
- /* did not receive more, it's ok */
status = APR_SUCCESS;
}
*plen = requested;
h2_util_bb_avail(stream->out_buffer, plen, peos);
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "post");
}
- H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_post");
-
+ else {
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "ok");
+ }
+
b = APR_BRIGADE_FIRST(stream->out_buffer);
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
e = APR_BUCKET_NEXT(b);
}
b = e;
}
-
+
b = get_first_headers_bucket(stream->out_buffer);
if (b) {
/* there are HEADERS to submit */
struct h2_bucket_beam *input;
struct h2_bucket_beam *output;
+ apr_size_t max_mem; /* maximum amount of data buffered */
apr_bucket_brigade *out_buffer;
int rst_error; /* stream error for RST_STREAM */