-*- coding: utf-8 -*-
Changes with Apache 2.5.0
+ *) mod_http2: fix for possible page fault when stream is resumed during
+ session shutdown. [sidney-j-r-m (github)]
+
+ *) mod_http2: fix for h2 session ignoring new responses while already
+ open streams continue to have data available. [Stefan Eissing]
+
*) mod_remoteip: Add support for PROXY protocol (code donated by Cloudzilla).
Add ability for PROXY protocol processing to be optional to donated code.
See also: http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt
apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
const h2_config *cfg)
{
- io->c = c;
- io->output = apr_brigade_create(c->pool, c->bucket_alloc);
- io->is_tls = h2_h2_is_tls(c);
- io->buffer_output = io->is_tls;
- io->pass_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM) / 2;
- io->flush_factor = h2_config_geti(cfg, H2_CONF_TLS_FLUSH_COUNT);
-
+ io->c = c;
+ io->output = apr_brigade_create(c->pool, c->bucket_alloc);
+ io->is_tls = h2_h2_is_tls(c);
+ io->buffer_output = io->is_tls;
+ io->pass_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM);
+ io->flush_factor = h2_config_geti(cfg, H2_CONF_TLS_FLUSH_COUNT);
+ io->speed_factor = 1.0;
+
if (io->is_tls) {
/* This is what we start with,
* see https://issues.apache.org/jira/browse/TS-2503
/* long time not written, reset write size */
io->write_size = WRITE_SIZE_INITIAL;
io->bytes_written = 0;
+ io->speed_factor = 1.0;
ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
"h2_conn_io(%ld): timeout write size reset to %ld",
(long)io->c->id, (long)io->write_size);
apr_status_t h2_conn_io_flush(h2_conn_io *io)
{
- return pass_output(io, 1, NULL);
+ apr_time_t start = 0;
+ apr_status_t status;
+
+ if (io->needs_flush > 0) {
+ /* this is a buffer size triggered flush, let's measure how
+ * long it takes and try to adjust our speed factor accordingly */
+ start = apr_time_now();
+ }
+ status = pass_output(io, 1, NULL);
+ check_write_size(io);
+ if (start && status == APR_SUCCESS) {
+ apr_time_t duration = apr_time_now() - start;
+ if (duration < apr_time_from_msec(100)) {
+ io->speed_factor *= 1.0 + ((apr_time_from_msec(100) - duration)
+ / (float)apr_time_from_msec(100));
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, io->c,
+ "h2_conn_io(%ld): incr speed_factor to %f",
+ io->c->id, io->speed_factor);
+ }
+ else if (duration > apr_time_from_msec(200)) {
+ io->speed_factor *= 0.5;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, io->c,
+ "h2_conn_io(%ld): decr speed_factor to %f",
+ io->c->id, io->speed_factor);
+ }
+ }
+ io->needs_flush = -1;
+ return status;
}
apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, h2_session *session)
else {
status = apr_brigade_write(io->output, NULL, NULL, data, length);
}
+ io->needs_flush = -1;
return status;
}
apr_bucket *b;
apr_status_t status = APR_SUCCESS;
- check_write_size(io);
while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
b = APR_BRIGADE_FIRST(bb);
APR_BRIGADE_INSERT_TAIL(io->output, b);
}
}
-
- if (status == APR_SUCCESS) {
- if (!APR_BRIGADE_EMPTY(io->output)) {
- apr_off_t len;
- if (io->buffer_output) {
- apr_brigade_length(io->output, 0, &len);
- if (len >= (io->flush_factor * io->write_size)) {
- return pass_output(io, 1, NULL);
- }
- }
- else {
- len = h2_brigade_mem_size(io->output);
- if (len >= io->pass_threshold) {
- return pass_output(io, 0, NULL);
- }
- }
- }
- }
+ io->needs_flush = -1;
return status;
}
+int h2_conn_io_needs_flush(h2_conn_io *io)
+{
+ if (io->needs_flush < 0) {
+ apr_off_t len;
+ apr_brigade_length(io->output, 0, &len);
+ if (len > (io->pass_threshold * io->flush_factor * io->speed_factor)) {
+ /* don't want to keep too much around */
+ io->needs_flush = 1;
+ return 1;
+ }
+ io->needs_flush = 0;
+ }
+ return 0;
+}
apr_int64_t bytes_written;
int buffer_output;
+ int needs_flush;
apr_size_t pass_threshold;
- int flush_factor;
+ float flush_factor;
+ float speed_factor;
char *scratch;
apr_size_t ssize;
*/
apr_status_t h2_conn_io_flush(h2_conn_io *io);
+int h2_conn_io_needs_flush(h2_conn_io *io);
+
#endif /* defined(__mod_h2__h2_conn_io__) */
#include "h2_workers.h"
+static apr_status_t dispatch_master(h2_session *session);
+
static int h2_session_status_from_apr_status(apr_status_t rv)
{
if (rv == APR_SUCCESS) {
(void)ngh2;
(void)flags;
+ if (h2_conn_io_needs_flush(&session->io)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_session(%ld): blocking due to io flush",
+ session->id);
+ status = h2_conn_io_flush(&session->io);
+ if (status != APR_SUCCESS) {
+ return h2_session_status_from_apr_status(status);
+ }
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+
status = h2_conn_io_write(&session->io, (const char *)data, length);
if (status == APR_SUCCESS) {
return length;
(void)ngh2;
(void)source;
+ if (h2_conn_io_needs_flush(&session->io)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): blocking due to io flush",
+ session->id, stream_id);
+ status = h2_conn_io_flush(&session->io);
+ if (status != APR_SUCCESS) {
+ return h2_session_status_from_apr_status(status);
+ }
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+
if (frame->data.padlen > H2_MAX_PADLEN) {
return NGHTTP2_ERR_PROTO;
}
apr_socket_timeout_set(socket, saved_timeout);
}
session->have_written = 1;
- if (rv != 0) {
+ if (rv != 0 && rv != NGHTTP2_ERR_WOULDBLOCK) {
if (nghttp2_is_fatal(rv)) {
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
return APR_EGENERAL;
}
}
+/* trigger window updates, stream resumes and submits */
+static apr_status_t dispatch_master(h2_session *session) {
+ apr_status_t status;
+
+ status = h2_mplx_dispatch_master_events(session->mplx,
+ on_stream_resume, session);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ "h2_session(%ld): dispatch error", session->id);
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "dispatch error");
+ }
+ return status;
+}
+
static const int MAX_WAIT_MICROS = 200 * 1000;
apr_status_t h2_session_process(h2_session *session, int async)
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
}
-
- /* trigger window updates, stream resumes and submits */
- status = h2_mplx_dispatch_master_events(session->mplx,
- on_stream_resume,
- session);
+
+ status = dispatch_master(session);
if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): dispatch error",
- session->id);
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR,
- "dispatch error");
break;
}
struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
- int open_streams; /* number of streams open */
+ int open_streams; /* number of client streams open */
int unsent_submits; /* number of submitted, but not yet written responses. */
- int unsent_promises; /* number of submitted, but not yet written push promised */
+ int unsent_promises; /* number of submitted, but not yet written push promises */
int responses_submitted; /* number of http/2 responses submitted */
int streams_reset; /* number of http/2 streams reset by client */
apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
int *peos, h2_headers **presponse)
{
- conn_rec *c = stream->session->c;
apr_status_t status = APR_SUCCESS;
apr_off_t requested, max_chunk = H2_DATA_CHUNK_SIZE;
apr_bucket *b, *e;
+ conn_rec *c;
if (presponse) {
*presponse = NULL;
*peos = 1;
return APR_ECONNRESET;
}
-
- if (!output_open(stream)) {
+ else if (!output_open(stream)) {
return APR_ECONNRESET;
}
+
+ c = stream->session->c;
prep_output(stream);
if (stream->session->io.write_size > 0) {