Changes with Apache 2.4.19
+ *) mod_http2: slave connections are reused for several requests, improved
+ performance and better memory use. [Stefan Eissing]
+
*) mod_rewrite: Don't implicitly URL-escape the original query string
when no substitution has changed it (like PR50447 but server context)
[Evgeny Kotkov <evgeny.kotkov visualsvn.com>]
return status;
}
-
conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
apr_allocator_t *allocator)
{
apr_pool_create_ex(&pool, parent, NULL, allocator);
apr_pool_tag(pool, "h2_slave_conn");
apr_allocator_owner_set(allocator, pool);
-
+
c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
if (c == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
}
apr_pool_destroy(slave->pool);
}
+
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
+{
+ return ap_run_pre_connection(slave, csd);
+}
+
apr_allocator_t *allocator);
void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator);
+apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
+void h2_slave_run_connection(conn_rec *slave);
+
#endif /* defined(__mod_h2__h2_conn__) */
m->tx_handles_reserved = 0;
m->tx_chunk_size = 4;
+ m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+
m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
m->stream_max_mem);
h2_ngn_shed_set_ctx(m->ngn_shed , m);
static void io_destroy(h2_mplx *m, h2_io *io, int events)
{
- apr_pool_t *pool;
-
/* cleanup any buffered input */
h2_io_in_shutdown(io);
if (events) {
}
if (io->task) {
- if (m->spare_allocator) {
- apr_allocator_destroy(m->spare_allocator);
- m->spare_allocator = NULL;
- }
-
- h2_slave_destroy(io->task->c, &m->spare_allocator);
+ conn_rec *slave = io->task->c;
+ h2_task_destroy(io->task);
io->task = NULL;
+
+ if (m->spare_slaves->nelts < m->spare_slaves->nalloc) {
+ APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
+ }
+ else {
+ h2_slave_destroy(slave, NULL);
+ }
}
- pool = io->pool;
- io->pool = NULL;
- if (0 && pool) {
- apr_pool_clear(pool);
- if (m->spare_pool) {
- apr_pool_destroy(m->spare_pool);
- }
- m->spare_pool = pool;
+ if (io->pool) {
+ apr_pool_destroy(io->pool);
}
check_tx_free(m);
static h2_io *open_io(h2_mplx *m, int stream_id, const h2_request *request)
{
- apr_pool_t *io_pool = m->spare_pool;
+ apr_pool_t *io_pool;
h2_io *io;
- if (!io_pool) {
- apr_pool_create(&io_pool, m->pool);
- apr_pool_tag(io_pool, "h2_io");
- }
- else {
- m->spare_pool = NULL;
- }
-
+ apr_pool_create(&io_pool, m->pool);
+ apr_pool_tag(io_pool, "h2_io");
io = h2_io_create(stream_id, io_pool, m->bucket_alloc, request);
h2_io_set_add(m->stream_ios, io);
}
}
else if (io) {
- conn_rec *slave = h2_slave_create(m->c, m->pool, m->spare_allocator);
- m->spare_allocator = NULL;
+ conn_rec *slave, **pslave;
+ int new_conn = 0;
+
+ pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
+ if (pslave) {
+ slave = *pslave;
+ }
+ else {
+ slave = h2_slave_create(m->c, m->pool, NULL);
+ new_conn = 1;
+ }
+
io->task = task = h2_task_create(m->id, io->request, slave, m);
apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id);
+ if (new_conn) {
+ h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
+ }
io->worker_started = 1;
io->started_at = apr_time_now();
if (sid > m->max_stream_started) {
apr_size_t stream_max_mem;
apr_interval_time_t stream_timeout;
- apr_pool_t *spare_pool; /* spare pool, ready for next io */
- apr_allocator_t *spare_allocator;
+ apr_array_header_t *spare_slaves; /* spare slave connections */
struct h2_workers *workers;
apr_size_t tx_handles_reserved;
static void ngn_add_task(h2_req_engine *ngn, h2_task *task)
{
- h2_ngn_entry *entry = apr_pcalloc(task->c->pool, sizeof(*entry));
+ h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry));
APR_RING_ELEM_INIT(entry, link);
entry->task = task;
H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry);
/* no existing engine or being shut down, start a new one */
if (einit) {
apr_status_t status;
- apr_pool_t *pool = task->c->pool;
+ apr_pool_t *pool = task->pool;
h2_req_engine *newngn;
newngn = apr_pcalloc(pool, sizeof(*ngn));
apr_read_type_e block,
apr_off_t readbytes)
{
- h2_task *task = filter->ctx;
+ h2_task *task = h2_ctx_cget_task(filter->c);
AP_DEBUG_ASSERT(task);
if (!task->input) {
return APR_ECONNABORTED;
static apr_status_t h2_filter_stream_output(ap_filter_t* filter,
apr_bucket_brigade* brigade)
{
- h2_task *task = filter->ctx;
+ h2_task *task = h2_ctx_cget_task(filter->c);
AP_DEBUG_ASSERT(task);
if (!task->output) {
return APR_ECONNABORTED;
return h2_task_output_write(task->output, filter, brigade);
}
-static apr_status_t h2_filter_read_response(ap_filter_t* f,
+static apr_status_t h2_filter_read_response(ap_filter_t* filter,
apr_bucket_brigade* bb)
{
- h2_task *task = f->ctx;
+ h2_task *task = h2_ctx_cget_task(filter->c);
AP_DEBUG_ASSERT(task);
if (!task->output || !task->output->from_h1) {
return APR_ECONNABORTED;
}
- return h2_from_h1_read_response(task->output->from_h1, f, bb);
+ return h2_from_h1_read_response(task->output->from_h1, filter, bb);
}
/*******************************************************************************
ctx = h2_ctx_get(c, 0);
(void)arg;
if (h2_ctx_is_task(ctx)) {
- h2_task *task = h2_ctx_get_task(ctx);
-
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
"h2_h2, pre_connection, found stream task");
/* Add our own, network level in- and output filters.
*/
- ap_add_input_filter("H2_TO_H1", task, NULL, c);
- ap_add_output_filter("H1_TO_H2", task, NULL, c);
+ ap_add_input_filter("H2_TO_H1", NULL, NULL, c);
+ ap_add_output_filter("H1_TO_H2", NULL, NULL, c);
}
return OK;
}
h2_task *h2_task_create(long session_id, const h2_request *req,
conn_rec *c, h2_mplx *mplx)
{
- h2_task *task = apr_pcalloc(c->pool, sizeof(h2_task));
+ apr_pool_t *pool;
+ h2_task *task;
+
+ apr_pool_create(&pool, c->pool);
+ task = apr_pcalloc(pool, sizeof(h2_task));
if (task == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, c,
APLOGNO(02941) "h2_task(%ld-%d): create stream task",
return NULL;
}
- task->id = apr_psprintf(c->pool, "%ld-%d", session_id, req->id);
+ task->id = apr_psprintf(pool, "%ld-%d", session_id, req->id);
task->stream_id = req->id;
task->c = c;
task->mplx = mplx;
+ task->pool = pool;
task->request = req;
task->input_eos = !req->body;
task->ser_headers = req->serialize;
task->blocking = 1;
h2_ctx_create_for(c, task);
-
return task;
}
+void h2_task_destroy(h2_task *task)
+{
+ if (task->pool) {
+ apr_pool_destroy(task->pool);
+ }
+}
+
void h2_task_set_io_blocking(h2_task *task, int blocking)
{
task->blocking = blocking;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_task(%s): process connection", task->id);
- ap_process_connection(task->c, ap_get_conn_socket(task->c));
+ ap_run_process_connection(task->c);
if (task->frozen) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
const char *id;
int stream_id;
conn_rec *c;
- struct h2_mplx *mplx;
+ struct h2_mplx *mplx;
+ apr_pool_t *pool;
const struct h2_request *request;
unsigned int filters_set : 1;
h2_task *h2_task_create(long session_id, const struct h2_request *req,
conn_rec *c, struct h2_mplx *mplx);
+void h2_task_destroy(h2_task *task);
+
apr_status_t h2_task_do(h2_task *task, struct apr_thread_cond_t *cond);
void h2_task_register_hooks(void);
h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c)
{
- h2_task_input *input = apr_pcalloc(c->pool, sizeof(h2_task_input));
+ h2_task_input *input = apr_pcalloc(task->pool, sizeof(h2_task_input));
if (input) {
- input->c = c;
input->task = task;
input->bb = NULL;
input->block = APR_BLOCK_READ;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task_input(%s): serialize request %s %s",
task->id, task->request->method, task->request->path);
- input->bb = apr_brigade_create(c->pool, c->bucket_alloc);
+ input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n",
task->request->method, task->request->path);
apr_table_do(ser_header, input, task->request->headers, NULL);
}
}
else if (!input->task->input_eos) {
- input->bb = apr_brigade_create(c->pool, c->bucket_alloc);
+ input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
}
else {
/* We do not serialize and have eos already, no need to
typedef struct h2_task_input h2_task_input;
struct h2_task_input {
- conn_rec *c;
struct h2_task *task;
apr_bucket_brigade *bb;
apr_read_type_e block;
h2_task_output *h2_task_output_create(h2_task *task, conn_rec *c)
{
- h2_task_output *output = apr_pcalloc(c->pool, sizeof(h2_task_output));
+ h2_task_output *output = apr_pcalloc(task->pool, sizeof(h2_task_output));
if (output) {
- output->c = c;
output->task = task;
- output->state = H2_TASK_OUT_INIT;
- output->from_h1 = h2_from_h1_create(task->stream_id, c->pool);
+ output->from_h1 = h2_from_h1_create(task->stream_id, task->pool);
}
return output;
}
output->trailers_passed = 1;
if (h2_task_logio_add_bytes_out) {
/* counter trailers as if we'd do a HTTP/1.1 serialization */
- h2_task_logio_add_bytes_out(output->c,
+ h2_task_logio_add_bytes_out(output->task->c,
h2_util_table_bytes(response->trailers, 3)+1);
}
return response->trailers;
if (f) {
/* This happens currently when ap_die(status, r) is invoked
* by a read request filter. */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->c, APLOGNO(03204)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03204)
"h2_task_output(%s): write without response by %s "
"for %s %s %s",
output->task->id, caller,
output->task->request->method,
output->task->request->authority,
output->task->request->path);
- output->c->aborted = 1;
+ output->task->c->aborted = 1;
}
if (output->task->io) {
apr_thread_cond_broadcast(output->task->io);
if (h2_task_logio_add_bytes_out) {
/* count headers as if we'd do a HTTP/1.1 serialization */
output->written = h2_util_table_bytes(response->headers, 3)+1;
- h2_task_logio_add_bytes_out(output->c, output->written);
+ h2_task_logio_add_bytes_out(output->task->c, output->written);
}
get_trailers(output);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->c, APLOGNO(03348)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03348)
"h2_task(%s): open response to %s %s %s",
output->task->id, output->task->request->method,
output->task->request->authority,
apr_status_t status;
apr_brigade_length(bb, 0, &written);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
"h2_task(%s): write response body (%ld bytes)",
output->task->id, (long)written);
if (status == APR_SUCCESS) {
output->written += written;
if (h2_task_logio_add_bytes_out) {
- h2_task_logio_add_bytes_out(output->c, written);
+ h2_task_logio_add_bytes_out(output->task->c, written);
}
}
return status;
apr_status_t status = APR_SUCCESS;
if (APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
"h2_task(%s): empty write", output->task->id);
return APR_SUCCESS;
}
if (output->task->frozen) {
- h2_util_bb_log(output->c, output->task->stream_id, APLOG_TRACE2,
+ h2_util_bb_log(output->task->c, output->task->stream_id, APLOG_TRACE2,
"frozen task output write, ignored", bb);
return APR_SUCCESS;
}
- if (output->state == H2_TASK_OUT_INIT) {
+ if (!output->response_open) {
status = open_response(output, f, bb, "write");
- output->state = H2_TASK_OUT_STARTED;
+ output->response_open = 1;
}
/* Attempt to write saved brigade first */
/* If the passed brigade is not empty, save it before return */
if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->task->c,
"h2_task(%s): could not write all, saving brigade",
output->task->id);
if (!output->bb) {
- output->bb = apr_brigade_create(output->c->pool, output->c->bucket_alloc);
+ output->bb = apr_brigade_create(output->task->pool, output->task->c->bucket_alloc);
}
- return ap_save_brigade(f, &output->bb, &bb, output->c->pool);
+ return ap_save_brigade(f, &output->bb, &bb, output->task->pool);
}
return status;
struct h2_task;
struct h2_from_h1;
-typedef enum {
- H2_TASK_OUT_INIT,
- H2_TASK_OUT_STARTED,
- H2_TASK_OUT_DONE,
-} h2_task_out_state_t;
-
typedef struct h2_task_output h2_task_output;
struct h2_task_output {
- conn_rec *c;
struct h2_task *task;
- h2_task_out_state_t state;
struct h2_from_h1 *from_h1;
+ unsigned int response_open : 1;
unsigned int trailers_passed : 1;
apr_off_t written;
if (APR_BUCKET_IS_EOS(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
}
- else if (APR_BUCKET_IS_FLUSH(b)) {
- APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
- }
else {
/* ignore */
}
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.4.2"
+#define MOD_HTTP2_VERSION "1.4.3"
/**
* @macro
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x010402
+#define MOD_HTTP2_VERSION_NUM 0x010403
#endif /* mod_h2_h2_version_h */