-*- coding: utf-8 -*-
Changes with Apache 2.5.1
+ *) mod_http2: accurate reporting of h2 data input/output per request via mod_logio. Fixes
+ an issue where output sizes where counted n-times on reused slave connections. See
+ gituhub issue: https://github.com/icing/mod_h2/issues/158
+ [Stefan Eissing]
+
*) mod_proxy: Do not restrict the maximum pool size for backend connections
any longer by the maximum number of threads per process and use a better
default if mod_http2 is loaded.
/* Max data size to write so it fits inside a TLS record */
#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9)
+/* Size of the frame header itself in HTTP/2 */
+#define H2_FRAME_HDR_LEN 9
+
/* Maximum number of padding bytes in a frame, rfc7540 */
#define H2_MAX_PADLEN 256
/* Initial default window size, RFC 7540 ch. 6.5.2 */
apr_time_t request_time;
unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
};
typedef struct h2_headers h2_headers;
int status;
apr_table_t *headers;
apr_table_t *notes;
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
};
typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
c->log = NULL;
c->log_id = apr_psprintf(pool, "%ld-%d",
master->id, slave_id);
- /* Simulate that we had already a request on this connection. */
- c->keepalives = 1;
c->aborted = 0;
/* We cannot install the master connection socket on the slaves, as
* modules mess with timeouts/blocking of the socket, with
apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
{
- return ap_run_pre_connection(slave, csd);
+ if (slave->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+ slave->keepalives = 1;
+ return ap_run_pre_connection(slave, csd);
+ }
+ return APR_SUCCESS;
}
h2_headers *response = h2_headers_create(parser->http_status,
make_table(parser),
- NULL, task->pool);
+ NULL, 0, task->pool);
apr_brigade_cleanup(parser->tmp);
b = h2_bucket_headers_create(task->c->bucket_alloc, response);
APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
ap_remove_input_filter(f);
+
+ if (headers->raw_bytes && h2_task_logio_add_bytes_in) {
+ h2_task_logio_add_bytes_in(task->c, headers->raw_bytes);
+ }
break;
}
}
h2_headers *h2_headers_create(int status, apr_table_t *headers_in,
- apr_table_t *notes, apr_pool_t *pool)
+ apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool)
{
h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers));
headers->status = status;
h2_headers *h2_headers_rcreate(request_rec *r, int status,
apr_table_t *header, apr_pool_t *pool)
{
- h2_headers *headers = h2_headers_create(status, header, r->notes, pool);
+ h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
if (headers->status == HTTP_FORBIDDEN) {
const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
if (cause) {
h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
{
return h2_headers_create(h->status, apr_table_copy(pool, h->headers),
- apr_table_copy(pool, h->notes), pool);
+ apr_table_copy(pool, h->notes), h->raw_bytes, pool);
}
h2_headers *h2_headers_die(apr_status_t type,
* @param status the headers status
* @param header the headers of the headers
* @param notes the notes carried by the headers
+ * @param raw_bytes the raw network bytes (if known) used to transmit these
* @param pool the memory pool to use
*/
h2_headers *h2_headers_create(int status, apr_table_t *header,
- apr_table_t *notes, apr_pool_t *pool);
+ apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool);
/**
* Create the headers from the given request_rec.
return 0;
}
-static void task_destroy(h2_mplx *m, h2_task *task)
-{
- conn_rec *slave = NULL;
- int reuse_slave = 0;
-
- slave = task->c;
-
- if (slave) {
- if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
- reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
- && !task->rst_error);
- }
-
- if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
- h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
- APLOGNO(03385) "h2_task_destroy, reuse slave");
- h2_task_destroy(task);
- APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
- }
- else {
- h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
- "h2_task_destroy, destroy slave");
- h2_slave_destroy(slave);
- }
- }
-}
-
static int stream_destroy_iter(void *ctx, void *val)
{
h2_mplx *m = ctx;
}
if (stream->task) {
- task_destroy(m, stream->task);
+ h2_task *task = stream->task;
+ conn_rec *slave;
+ int reuse_slave = 0;
+
stream->task = NULL;
+ slave = task->c;
+ if (slave) {
+ /* On non-serialized requests, the IO logging has not accounted for any
+ * meta data send over the network: response headers and h2 frame headers. we
+ * counted this on the stream and need to add this now.
+ * This is supposed to happen before the EOR bucket triggers the
+ * logging of the transaction. *fingers crossed* */
+ if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) {
+ apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets;
+ if (unaccounted > 0) {
+ h2_task_logio_add_bytes_out(slave, unaccounted);
+ }
+ }
+
+ if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
+ reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
+ && !task->rst_error);
+ }
+
+ if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
+ h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
+ APLOGNO(03385) "h2_task_destroy, reuse slave");
+ h2_task_destroy(task);
+ APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
+ }
+ else {
+ h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
+ "h2_task_destroy, destroy slave");
+ h2_slave_destroy(slave);
+ }
+ }
}
h2_stream_destroy(stream);
return 0;
ctx->req->authority, path, headers,
ctx->req->serialize);
/* atm, we do not push on pushes */
- h2_request_end_headers(req, ctx->pool, 1);
+ h2_request_end_headers(req, ctx->pool, 1, 0);
push->req = req;
if (has_param(ctx, "critical")) {
h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio));
return status;
}
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos)
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes)
{
const char *s;
apr_table_setn(req->headers, "Content-Length", "0");
}
}
-
+ req->raw_bytes += raw_bytes;
+
return APR_SUCCESS;
}
const char *name, size_t nlen,
const char *value, size_t vlen);
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos);
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes);
h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
* trailers */
stream = get_stream(session, frame->hd.stream_id);
if (stream) {
- rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags);
+ rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
}
break;
case NGHTTP2_DATA:
H2_STRM_LOG(APLOGNO(02923), stream,
"DATA, len=%ld, flags=%d"),
(long)frame->hd.length, frame->hd.flags);
- rv = h2_stream_recv_frame(stream, NGHTTP2_DATA, frame->hd.flags);
+ rv = h2_stream_recv_frame(stream, NGHTTP2_DATA, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
}
break;
case NGHTTP2_PRIORITY:
stream = get_stream(session, stream_id);
if (stream) {
- h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags);
+ h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags,
+ frame->hd.length + H2_FRAME_HDR_LEN);
}
return 0;
}
stream->in_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
}
- r = h2_headers_create(HTTP_OK, stream->trailers, NULL, stream->pool);
+ r = h2_headers_create(HTTP_OK, stream->trailers, NULL,
+ stream->in_trailer_octets, stream->pool);
stream->trailers = NULL;
b = h2_bucket_headers_create(c->bucket_alloc, r);
APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS);
}
-apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags)
+apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
{
apr_status_t status = APR_SUCCESS;
int new_state, eos = 0;
AP_DEBUG_ASSERT(new_state > S_XXX);
return transit(stream, new_state);
}
-
+
+ ++stream->out_frames;
+ stream->out_frame_octets += frame_len;
switch (ftype) {
case NGHTTP2_DATA:
eos = (flags & NGHTTP2_FLAG_END_STREAM);
/* start pushed stream */
ap_assert(stream->request == NULL);
ap_assert(stream->rtmp != NULL);
- status = h2_request_end_headers(stream->rtmp, stream->pool, 1);
+ status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
if (status != APR_SUCCESS) {
return status;
}
return status;
}
-apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags)
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
{
apr_status_t status = APR_SUCCESS;
int new_state, eos = 0;
if (!eos) {
h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
}
+ stream->in_trailer_octets += frame_len;
}
else {
/* request HEADER */
* to abort the connection here, since this is clearly a protocol error */
return APR_EINVAL;
}
- status = h2_request_end_headers(stream->rtmp, stream->pool, eos);
+ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
if (status != APR_SUCCESS) {
return status;
}
stream->rtmp = req;
/* simulate the frames that led to this */
return h2_stream_recv_frame(stream, NGHTTP2_HEADERS,
- NGHTTP2_FLAG_END_STREAM);
+ NGHTTP2_FLAG_END_STREAM, 0);
}
return status;
}
struct h2_task *task; /* assigned task to fullfill request */
const h2_priority *pref_priority; /* preferred priority for this stream */
+ apr_off_t out_frames; /* # of frames sent out */
+ apr_off_t out_frame_octets; /* # of RAW frame octets sent out */
apr_off_t out_data_frames; /* # of DATA frames sent */
apr_off_t out_data_octets; /* # of DATA octets (payload) sent */
apr_off_t in_data_frames; /* # of DATA frames received */
apr_off_t in_data_octets; /* # of DATA octets (payload) received */
+ apr_off_t in_trailer_octets; /* # of HEADER octets (payload) received in trailers */
h2_stream_monitor *monitor; /* optional monitor for stream states */
};
const char *name, size_t nlen,
const char *value, size_t vlen);
-apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags);
-apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags);
+apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
/*
* Process a frame of received DATA.
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task(%s): start process_request", task->id);
+ /* Add the raw bytes of the request (e.g. header frame lengths to
+ * the logio for this request. */
+ if (req->raw_bytes && h2_task_logio_add_bytes_in) {
+ h2_task_logio_add_bytes_in(c, req->raw_bytes);
+ }
+
ap_process_request(r);
+
if (task->frozen) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task(%s): process_request frozen", task->id);
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.10.16-DEV"
+#define MOD_HTTP2_VERSION "1.10.17-DEV"
/**
* @macro
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x010a10
+#define MOD_HTTP2_VERSION_NUM 0x010a11
#endif /* mod_h2_h2_version_h */