-*- coding: utf-8 -*-
Changes with Apache 2.5.0
+ *) mod_http2: checking LimitRequestLine, LimitRequestFields and
+ LimitRequestFieldSize configurated values for incoming streams.
+ [Stefan Eissing]
+
*) mod_http2: tracking conn_rec->current_thread on slave connections, so
that mod_lua finds the correct one. Fixes PR 59542. [Stefan Eissing]
}
}
-static void set_basic_http_header(request_rec *r, apr_table_t *headers)
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool)
{
char *date = NULL;
const char *proxy_date = NULL;
* keep the set-by-proxy server and date headers, otherwise
* generate a new server header / date header
*/
- if (r->proxyreq != PROXYREQ_NONE) {
+ if (r && r->proxyreq != PROXYREQ_NONE) {
proxy_date = apr_table_get(r->headers_out, "Date");
if (!proxy_date) {
/*
* our own Date header and pass it over to proxy_date later to
* avoid a compiler warning.
*/
- date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, r->request_time);
}
server = apr_table_get(r->headers_out, "Server");
}
else {
- date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r? r->request_time : apr_time_now());
}
apr_table_setn(headers, "Date", proxy_date ? proxy_date : date );
- apr_table_unset(r->headers_out, "Date");
+ if (r) {
+ apr_table_unset(r->headers_out, "Date");
+ }
if (!server && *us) {
server = us;
}
if (server) {
apr_table_setn(headers, "Server", server);
- apr_table_unset(r->headers_out, "Server");
+ if (r) {
+ apr_table_unset(r->headers_out, "Server");
+ }
}
}
headers = apr_table_make(r->pool, 10);
- set_basic_http_header(r, headers);
+ h2_from_h1_set_basic_http_header(headers, r, r->pool);
if (r->status == HTTP_NOT_MODIFIED) {
apr_table_do((int (*)(void *, const char *, const char *)) copy_header,
(void *) headers, r->headers_out,
apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb);
+void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool);
+
#endif /* defined(__mod_h2__h2_from_h1__) */
m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->sready = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->q = h2_iq_create(m->pool, m->max_streams);
m->tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
- m->ready_tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
m->stream_timeout = stream_timeout;
m->workers = workers;
&& !task->rst_error);
h2_ihash_remove(m->tasks, task->stream_id);
- h2_ihash_remove(m->ready_tasks, task->stream_id);
if (m->redo_tasks) {
h2_ihash_remove(m->redo_tasks, task->stream_id);
}
* stream destruction until the task is done.
*/
h2_iq_remove(m->q, stream->id);
- h2_ihash_remove(m->ready_tasks, stream->id);
+ h2_ihash_remove(m->sready, stream->id);
h2_ihash_remove(m->streams, stream->id);
if (stream->input) {
m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input);
return status;
}
-static int task_iter_first(void *ctx, void *val)
+static int stream_iter_first(void *ctx, void *val)
{
- task_iter_ctx *tctx = ctx;
- h2_task *task = val;
- tctx->task = task;
+ h2_stream **pstream = ctx;
+ *pstream = val;
return 0;
}
AP_DEBUG_ASSERT(m);
if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- task_iter_ctx ctx;
- ctx.m = m;
- ctx.task = NULL;
- h2_ihash_iter(m->ready_tasks, task_iter_first, &ctx);
-
- if (ctx.task && !m->aborted) {
- h2_task *task = ctx.task;
-
- h2_ihash_remove(m->ready_tasks, task->stream_id);
- stream = h2_ihash_get(m->streams, task->stream_id);
- if (stream && task) {
+ h2_ihash_iter(m->sready, stream_iter_first, &stream);
+ if (stream) {
+ h2_task *task = h2_ihash_get(m->tasks, stream->id);
+ h2_ihash_remove(m->sready, stream->id);
+ if (task) {
task->submitted = 1;
if (task->rst_error) {
h2_stream_rst(stream, task->rst_error);
task->output.beam);
}
}
- else if (task) {
- /* We have the io ready, but the stream has gone away, maybe
- * reset by the client. Should no longer happen since such
- * streams should clear io's from the ready queue.
- */
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03347)
- "h2_mplx(%s): stream for response closed, "
- "resetting io to close request processing",
- task->id);
- h2_task_rst(task, H2_ERR_STREAM_CLOSED);
- if (!task->worker_started || task->worker_done) {
- task_destroy(m, task, 1);
- }
- else {
- /* hang around until the h2_task is done, but
- * shutdown output */
- h2_task_shutdown(task, 0);
- }
+ else {
+ /* We have the stream ready without a task. This happens
+ * when we fail streams early. A response should already
+ * be present. */
+ AP_DEBUG_ASSERT(stream->response || stream->rst_error);
}
}
leave_mutex(m, acquired);
h2_beam_mutex_set(task->output.beam, beam_enter, task->cond, m);
}
- h2_ihash_add(m->ready_tasks, task);
+ h2_ihash_add(m->sready, stream);
if (response && response->http_status < 300) {
/* we might see some file buckets in the output, see
* if we have enough handles reserved. */
if (!task->response && !task->rst_error) {
/* In case a close comes before a response was created,
- * insert an error one so that our streams can properly
- * reset.
+ * insert an error one so that our streams can properly reset.
*/
- h2_response *r = h2_response_die(task->stream_id, APR_EGENERAL,
+ h2_response *r = h2_response_die(task->stream_id, 500,
task->request, m->pool);
status = out_open(m, task->stream_id, r);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
if (m->aborted) {
status = APR_ECONNABORTED;
}
+ else if (stream->response) {
+ /* already have a respone, schedule for submit */
+ h2_ihash_add(m->sready, stream);
+ }
else {
h2_beam_create(&stream->input, stream->pool, stream->id,
"input", 0);
unsigned int need_registration : 1;
struct h2_ihash_t *streams; /* all streams currently processing */
+ struct h2_ihash_t *sready; /* all streams ready for response */
struct h2_ihash_t *shold; /* all streams done with task ongoing */
struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
struct h2_iqueue *q; /* all stream ids that need to be started */
struct h2_ihash_t *tasks; /* all tasks started and not destroyed */
- struct h2_ihash_t *ready_tasks; /* all tasks ready for submit */
struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */
apr_uint32_t max_streams; /* max # of concurrent streams */
{
apr_table_t *headers = apr_table_make(pool, 5);
char *date = NULL;
+ int status = (type >= 200 && type < 600)? type : 500;
date = apr_palloc(pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, req->request_time);
apr_table_setn(headers, "Date", date);
apr_table_setn(headers, "Server", ap_get_server_banner());
- return h2_response_create_int(stream_id, 0, 500, headers, NULL, pool);
+ return h2_response_create_int(stream_id, 0, status, headers, NULL, pool);
}
h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from)
else {
++session->responses_submitted;
}
-
+ stream->submitted = 1;
+
if (nghttp2_is_fatal(rv)) {
status = APR_EGENERAL;
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
const char *value, size_t vlen)
{
AP_DEBUG_ASSERT(stream);
+ if (stream->response) {
+ /* already have the response, ignore any more request headers
+ (hint: response might already be a failure due to previous errs */
+ return APR_SUCCESS;
+ }
+
+ if ((nlen > 0) && name[0] == ':') {
+ if ((vlen) > stream->session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): pseudo header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+ }
+ else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) {
+ /* header too long */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): header %s too long",
+ stream->session->id, stream->id, name);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+
+ ++stream->request_headers_added;
+ if (stream->request_headers_added > stream->session->s->limit_req_fields) {
+ /* too many header lines */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ "h2_stream(%ld-%d): too many header lines",
+ stream->session->id, stream->id);
+ return h2_stream_set_error(stream,
+ HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+ }
+
if (h2_stream_is_scheduled(stream)) {
return h2_request_add_trailer(stream->request, stream->pool,
name, nlen, value, vlen);
close_input(stream);
}
+ if (stream->response) {
+ /* already have a resonse, probably a HTTP error code */
+ return h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
+ }
+
/* Seeing the end-of-headers, we have everything we need to
* start processing it.
*/
return status;
}
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status)
+{
+ h2_response *response;
+
+ if (stream->submitted) {
+ return APR_EINVAL;
+ }
+ response = h2_response_die(stream->id, http_status, stream->request,
+ stream->pool);
+ return h2_stream_set_response(stream, response, NULL);
+}
+
static const apr_size_t DATA_CHUNK_SIZE = ((16*1024) - 100 - 9);
apr_status_t h2_stream_out_prepare(h2_stream *stream,
apr_pool_t *pool; /* the memory pool for this stream */
struct h2_request *request; /* the request made in this stream */
struct h2_bucket_beam *input;
-
+ int request_headers_added; /* number of request headers added */
+
struct h2_response *response;
struct h2_bucket_beam *output;
apr_bucket_brigade *buffer;
struct h2_response *response,
struct h2_bucket_beam *output);
+/**
+ * Set the HTTP error status as response.
+ */
+apr_status_t h2_stream_set_error(h2_stream *stream, int http_status);
+
/**
* Do a speculative read on the stream output to determine the
* amount of data that can be read.