-*- coding: utf-8 -*-
Changes with Apache 2.5.0
+ *) mod_http2: slave connections are now reused. [Stefan Eissing]
+
*) mod_proxy_http2: using HTTP/2 flow control for backend streams by
observing data actually send out on the frontend h2 connection.
[Stefan Eissing]
return status;
}
-
conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
apr_allocator_t *allocator)
{
apr_pool_create_ex(&pool, parent, NULL, allocator);
apr_pool_tag(pool, "h2_slave_conn");
apr_allocator_owner_set(allocator, pool);
-
+
c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
if (c == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
}
apr_pool_destroy(slave->pool);
}
+
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
+{
+ return ap_run_pre_connection(slave, csd);
+}
+
apr_allocator_t *allocator);
void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator);
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
+void h2_slave_run_connection(conn_rec *slave);
+
#endif /* defined(__mod_h2__h2_conn__) */
m->tx_handles_reserved = 0;
m->tx_chunk_size = 4;
+ m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+
m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
m->stream_max_mem);
h2_ngn_shed_set_ctx(m->ngn_shed , m);
static void io_destroy(h2_mplx *m, h2_io *io, int events)
{
- apr_pool_t *pool;
-
/* cleanup any buffered input */
h2_io_in_shutdown(io);
if (events) {
}
if (io->task) {
- if (m->spare_allocator) {
- apr_allocator_destroy(m->spare_allocator);
- m->spare_allocator = NULL;
- }
-
- h2_slave_destroy(io->task->c, &m->spare_allocator);
+ conn_rec *slave = io->task->c;
+ h2_task_destroy(io->task);
io->task = NULL;
+
+ if (m->spare_slaves->nelts < m->spare_slaves->nalloc) {
+ APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
+ }
+ else {
+ h2_slave_destroy(slave, NULL);
+ }
}
- pool = io->pool;
- io->pool = NULL;
- if (0 && pool) {
- apr_pool_clear(pool);
- if (m->spare_pool) {
- apr_pool_destroy(m->spare_pool);
- }
- m->spare_pool = pool;
+ if (io->pool) {
+ apr_pool_destroy(io->pool);
}
check_tx_free(m);
static h2_io *open_io(h2_mplx *m, int stream_id, const h2_request *request)
{
- apr_pool_t *io_pool = m->spare_pool;
+ apr_pool_t *io_pool;
h2_io *io;
- if (!io_pool) {
- apr_pool_create(&io_pool, m->pool);
- apr_pool_tag(io_pool, "h2_io");
- }
- else {
- m->spare_pool = NULL;
- }
-
+ apr_pool_create(&io_pool, m->pool);
+ apr_pool_tag(io_pool, "h2_io");
io = h2_io_create(stream_id, io_pool, m->bucket_alloc, request);
h2_io_set_add(m->stream_ios, io);
}
}
else if (io) {
- conn_rec *slave = h2_slave_create(m->c, m->pool, m->spare_allocator);
- m->spare_allocator = NULL;
+ conn_rec *slave, **pslave;
+
+ pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
+ if (pslave) {
+ slave = *pslave;
+ }
+ else {
+ slave = h2_slave_create(m->c, m->pool, NULL);
+ h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
+ }
+
+
io->task = task = h2_task_create(m->id, io->request, slave, m);
apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id);
+
io->worker_started = 1;
io->started_at = apr_time_now();
if (sid > m->max_stream_started) {
apr_size_t stream_max_mem;
apr_interval_time_t stream_timeout;
- apr_pool_t *spare_pool; /* spare pool, ready for next io */
- apr_allocator_t *spare_allocator;
+ apr_array_header_t *spare_slaves; /* spare slave connections */
struct h2_workers *workers;
apr_size_t tx_handles_reserved;
static void ngn_add_task(h2_req_engine *ngn, h2_task *task)
{
- h2_ngn_entry *entry = apr_pcalloc(task->c->pool, sizeof(*entry));
+ h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry));
APR_RING_ELEM_INIT(entry, link);
entry->task = task;
H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry);
/* no existing engine or being shut down, start a new one */
if (einit) {
apr_status_t status;
- apr_pool_t *pool = task->c->pool;
+ apr_pool_t *pool = task->pool;
h2_req_engine *newngn;
newngn = apr_pcalloc(pool, sizeof(*ngn));
/*******************************************************************************
* Register various hooks
*/
-static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
-static int h2_task_pre_conn(conn_rec* c, void *arg);
static int h2_task_process_conn(conn_rec* c);
APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
void h2_task_register_hooks(void)
{
- /* This hook runs on new connections before mod_ssl has a say.
- * Its purpose is to prevent mod_ssl from touching our pseudo-connections
- * for streams.
- */
- ap_hook_pre_connection(h2_task_pre_conn,
- NULL, mod_ssl, APR_HOOK_FIRST);
/* When the connection processing actually starts, we might
* take over, if the connection is for a task.
*/
return APR_SUCCESS;
}
-static int h2_task_pre_conn(conn_rec* c, void *arg)
-{
- h2_ctx *ctx;
-
- if (!c->master) {
- return OK;
- }
-
- ctx = h2_ctx_get(c, 0);
- (void)arg;
- if (h2_ctx_is_task(ctx)) {
- h2_task *task = h2_ctx_get_task(ctx);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_h2, pre_connection, found stream task");
-
- /* Add our own, network level in- and output filters.
- */
- ap_add_input_filter("H2_TO_H1", task, NULL, c);
- ap_add_output_filter("H1_TO_H2", task, NULL, c);
- }
- return OK;
-}
-
h2_task *h2_task_create(long session_id, const h2_request *req,
conn_rec *c, h2_mplx *mplx)
{
- h2_task *task = apr_pcalloc(c->pool, sizeof(h2_task));
+ apr_pool_t *pool;
+ h2_task *task;
+
+ apr_pool_create(&pool, c->pool);
+ task = apr_pcalloc(pool, sizeof(h2_task));
if (task == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, c,
APLOGNO(02941) "h2_task(%ld-%d): create stream task",
return NULL;
}
- task->id = apr_psprintf(c->pool, "%ld-%d", session_id, req->id);
+ task->id = apr_psprintf(pool, "%ld-%d", session_id, req->id);
task->stream_id = req->id;
task->c = c;
task->mplx = mplx;
+ task->pool = pool;
task->request = req;
task->input_eos = !req->body;
task->ser_headers = req->serialize;
task->blocking = 1;
h2_ctx_create_for(c, task);
+ /* Add our own, network level in- and output filters. */
+ ap_add_input_filter("H2_TO_H1", task, NULL, c);
+ ap_add_output_filter("H1_TO_H2", task, NULL, c);
return task;
}
+void h2_task_destroy(h2_task *task)
+{
+ ap_remove_input_filter_byhandle(task->c->output_filters, "H2_TO_H1");
+ ap_remove_output_filter_byhandle(task->c->output_filters, "H1_TO_H2");
+ if (task->pool) {
+ apr_pool_destroy(task->pool);
+ }
+}
+
void h2_task_set_io_blocking(h2_task *task, int blocking)
{
task->blocking = blocking;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_task(%s): process connection", task->id);
- ap_process_connection(task->c, ap_get_conn_socket(task->c));
+ ap_run_process_connection(task->c);
if (task->frozen) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
const char *id;
int stream_id;
conn_rec *c;
- struct h2_mplx *mplx;
+ struct h2_mplx *mplx;
+ apr_pool_t *pool;
const struct h2_request *request;
unsigned int filters_set : 1;
h2_task *h2_task_create(long session_id, const struct h2_request *req,
conn_rec *c, struct h2_mplx *mplx);
+void h2_task_destroy(h2_task *task);
+
apr_status_t h2_task_do(h2_task *task, struct apr_thread_cond_t *cond);
void h2_task_register_hooks(void);
h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c)
{
- h2_task_input *input = apr_pcalloc(c->pool, sizeof(h2_task_input));
+ h2_task_input *input = apr_pcalloc(task->pool, sizeof(h2_task_input));
if (input) {
- input->c = c;
input->task = task;
input->bb = NULL;
input->block = APR_BLOCK_READ;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task_input(%s): serialize request %s %s",
task->id, task->request->method, task->request->path);
- input->bb = apr_brigade_create(c->pool, c->bucket_alloc);
+ input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n",
task->request->method, task->request->path);
apr_table_do(ser_header, input, task->request->headers, NULL);
}
}
else if (!input->task->input_eos) {
- input->bb = apr_brigade_create(c->pool, c->bucket_alloc);
+ input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
}
else {
/* We do not serialize and have eos already, no need to
typedef struct h2_task_input h2_task_input;
struct h2_task_input {
- conn_rec *c;
struct h2_task *task;
apr_bucket_brigade *bb;
apr_read_type_e block;
h2_task_output *h2_task_output_create(h2_task *task, conn_rec *c)
{
- h2_task_output *output = apr_pcalloc(c->pool, sizeof(h2_task_output));
+ h2_task_output *output = apr_pcalloc(task->pool, sizeof(h2_task_output));
if (output) {
- output->c = c;
output->task = task;
- output->state = H2_TASK_OUT_INIT;
- output->from_h1 = h2_from_h1_create(task->stream_id, c->pool);
+ output->from_h1 = h2_from_h1_create(task->stream_id, task->pool);
}
return output;
}
output->trailers_passed = 1;
if (h2_task_logio_add_bytes_out) {
/* counter trailers as if we'd do a HTTP/1.1 serialization */
- h2_task_logio_add_bytes_out(output->c,
+ h2_task_logio_add_bytes_out(output->task->c,
h2_util_table_bytes(response->trailers, 3)+1);
}
return response->trailers;
if (f) {
/* This happens currently when ap_die(status, r) is invoked
* by a read request filter. */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->c, APLOGNO(03204)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03204)
"h2_task_output(%s): write without response by %s "
"for %s %s %s",
output->task->id, caller,
output->task->request->method,
output->task->request->authority,
output->task->request->path);
- output->c->aborted = 1;
+ output->task->c->aborted = 1;
}
if (output->task->io) {
apr_thread_cond_broadcast(output->task->io);
if (h2_task_logio_add_bytes_out) {
/* count headers as if we'd do a HTTP/1.1 serialization */
output->written = h2_util_table_bytes(response->headers, 3)+1;
- h2_task_logio_add_bytes_out(output->c, output->written);
+ h2_task_logio_add_bytes_out(output->task->c, output->written);
}
get_trailers(output);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->c, APLOGNO(03348)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03348)
"h2_task(%s): open response to %s %s %s",
output->task->id, output->task->request->method,
output->task->request->authority,
apr_status_t status;
apr_brigade_length(bb, 0, &written);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
"h2_task(%s): write response body (%ld bytes)",
output->task->id, (long)written);
if (status == APR_SUCCESS) {
output->written += written;
if (h2_task_logio_add_bytes_out) {
- h2_task_logio_add_bytes_out(output->c, written);
+ h2_task_logio_add_bytes_out(output->task->c, written);
}
}
return status;
apr_status_t status = APR_SUCCESS;
if (APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
"h2_task(%s): empty write", output->task->id);
return APR_SUCCESS;
}
if (output->task->frozen) {
- h2_util_bb_log(output->c, output->task->stream_id, APLOG_TRACE2,
+ h2_util_bb_log(output->task->c, output->task->stream_id, APLOG_TRACE2,
"frozen task output write, ignored", bb);
return APR_SUCCESS;
}
- if (output->state == H2_TASK_OUT_INIT) {
+ if (!output->response_open) {
status = open_response(output, f, bb, "write");
- output->state = H2_TASK_OUT_STARTED;
+ output->response_open = 1;
}
/* Attempt to write saved brigade first */
/* If the passed brigade is not empty, save it before return */
if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->task->c,
"h2_task(%s): could not write all, saving brigade",
output->task->id);
if (!output->bb) {
- output->bb = apr_brigade_create(output->c->pool, output->c->bucket_alloc);
+ output->bb = apr_brigade_create(output->task->pool, output->task->c->bucket_alloc);
}
- return ap_save_brigade(f, &output->bb, &bb, output->c->pool);
+ return ap_save_brigade(f, &output->bb, &bb, output->task->pool);
}
return status;
struct h2_task;
struct h2_from_h1;
-typedef enum {
- H2_TASK_OUT_INIT,
- H2_TASK_OUT_STARTED,
- H2_TASK_OUT_DONE,
-} h2_task_out_state_t;
-
typedef struct h2_task_output h2_task_output;
struct h2_task_output {
- conn_rec *c;
struct h2_task *task;
- h2_task_out_state_t state;
struct h2_from_h1 *from_h1;
+ unsigned int response_open : 1;
unsigned int trailers_passed : 1;
apr_off_t written;
if (APR_BUCKET_IS_EOS(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
}
- else if (APR_BUCKET_IS_FLUSH(b)) {
- APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
- }
else {
/* ignore */
}
typedef struct h2_proxy_ctx {
conn_rec *owner;
+ apr_pool_t *pool;
request_rec *rbase;
server_rec *server;
const char *proxy_func;
h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
&proxy_http2_module);
if (ctx) {
+ conn_rec *c = ctx->owner;
+ h2_proxy_ctx *nctx;
+
+ /* we need another lifetime for this. If we do not host
+ * an engine, the context lives in r->pool. Since we expect
+ * to server more than r, we need to live longer */
+ nctx = apr_pcalloc(pool, sizeof(*nctx));
+ if (nctx == NULL) {
+ return APR_ENOMEM;
+ }
+ memcpy(nctx, ctx, sizeof(*nctx));
+ ctx = nctx;
+ ctx->pool = pool;
ctx->engine = engine;
ctx->engine_id = id;
ctx->engine_type = type;
ctx->engine_pool = pool;
ctx->req_buffer_size = req_buffer_size;
ctx->capacity = 100;
+
+ ap_set_module_config(c->conn_config, &proxy_http2_module, ctx);
+
*pconsumed = out_consumed;
*pctx = ctx;
return APR_SUCCESS;
return status;
}
-static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx)
+static h2_proxy_ctx *push_request_somewhere(h2_proxy_ctx *ctx)
{
conn_rec *c = ctx->owner;
const char *engine_type, *hostname;
hostname = (ctx->p_conn->ssl_hostname?
ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
- engine_type = apr_psprintf(c->pool, "proxy_http2 %s%s", hostname,
+ engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
ctx->server_portstr);
if (c->master && req_engine_push && ctx->next && is_h2 && is_h2(c)) {
* uses the same backend. We may be called to create an engine
* ourself. */
if (req_engine_push(engine_type, ctx->next, proxy_engine_init)
- == APR_SUCCESS && ctx->engine == NULL) {
- /* Another engine instance has taken over processing of this
- * request. */
- ctx->r_status = SUSPENDED;
- ctx->next = NULL;
-
- return APR_SUCCESS;
+ == APR_SUCCESS) {
+ /* to renew the lifetime, we might have set a new ctx */
+ ctx = ap_get_module_config(c->conn_config, &proxy_http2_module);
+ if (ctx->engine == NULL) {
+ /* Another engine instance has taken over processing of this
+ * request. */
+ ctx->r_status = SUSPENDED;
+ ctx->next = NULL;
+ return ctx;
+ }
}
}
if (!ctx->engine) {
/* No engine was available or has been initialized, handle this
* request just by ourself. */
- ctx->engine_id = apr_psprintf(c->pool, "eng-proxy-%ld", c->id);
+ ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
ctx->engine_type = engine_type;
- ctx->engine_pool = c->pool;
+ ctx->engine_pool = ctx->pool;
ctx->req_buffer_size = (32*1024);
ctx->standalone = 1;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"H2: hosting engine %s", ctx->engine_id);
}
- return APR_SUCCESS;
+ return ctx;
}
static int proxy_http2_handler(request_rec *r,
apr_size_t slen;
int is_ssl = 0;
apr_status_t status;
- conn_rec *c = r->connection;
- server_rec *s = r->server;
- apr_pool_t *p = c->pool;
- apr_uri_t *uri = apr_palloc(p, sizeof(*uri));
h2_proxy_ctx *ctx;
+ apr_uri_t uri;
int reconnected = 0;
/* find the scheme */
default:
return DECLINED;
}
-
- ctx = apr_pcalloc(p, sizeof(*ctx));
- ctx->owner = c;
+ ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->owner = r->connection;
+ ctx->pool = r->pool;
ctx->rbase = r;
- ctx->server = s;
+ ctx->server = r->server;
ctx->proxy_func = proxy_func;
ctx->is_ssl = is_ssl;
ctx->worker = worker;
ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
ctx->next = r;
r = NULL;
- ap_set_module_config(c->conn_config, &proxy_http2_module, ctx);
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
/* scheme says, this is for us. */
apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
* be one still open from another request, or it might fail if the
* worker is stopped or in error. */
if ((status = ap_proxy_acquire_connection(ctx->proxy_func, &ctx->p_conn,
- ctx->worker, s)) != OK) {
+ ctx->worker, ctx->server)) != OK) {
goto cleanup;
}
/* Step One: Determine the URL to connect to (might be a proxy),
* initialize the backend accordingly and determine the server
* port string we can expect in responses. */
- if ((status = ap_proxy_determine_connection(p, ctx->rbase, conf, worker,
- ctx->p_conn, uri, &locurl,
+ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
+ ctx->p_conn, &uri, &locurl,
proxyname, proxyport,
ctx->server_portstr,
sizeof(ctx->server_portstr))) != OK) {
/* If we are not already hosting an engine, try to push the request
* to an already existing engine or host a new engine here. */
if (!ctx->engine) {
- push_request_somewhere(ctx);
+ ctx = push_request_somewhere(ctx);
if (ctx->r_status == SUSPENDED) {
/* request was pushed to another engine */
goto cleanup;
ctx->p_conn = NULL;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, "leaving handler");
+ ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, "leaving handler");
return ctx->r_status;
}