From b9eb3fa8b39723fd81ef555a6840a3f684ee0bbc Mon Sep 17 00:00:00 2001 From: Stefan Eissing Date: Tue, 17 May 2016 14:33:15 +0000 Subject: [PATCH] mod_http2: checking configured Limit* values against header fields git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1744283 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 +++ modules/http2/h2_from_h1.c | 21 ++++++++----- modules/http2/h2_from_h1.h | 3 ++ modules/http2/h2_mplx.c | 62 +++++++++++++------------------------ modules/http2/h2_mplx.h | 2 +- modules/http2/h2_response.c | 3 +- modules/http2/h2_session.c | 3 +- modules/http2/h2_stream.c | 52 +++++++++++++++++++++++++++++++ modules/http2/h2_stream.h | 8 ++++- 9 files changed, 106 insertions(+), 52 deletions(-) diff --git a/CHANGES b/CHANGES index d2cf7279fd..45c15063a1 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,10 @@ -*- coding: utf-8 -*- Changes with Apache 2.5.0 + *) mod_http2: checking LimitRequestLine, LimitRequestFields and + LimitRequestFieldSize configurated values for incoming streams. + [Stefan Eissing] + *) mod_http2: tracking conn_rec->current_thread on slave connections, so that mod_lua finds the correct one. Fixes PR 59542. [Stefan Eissing] diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c index eb866a7911..0f893ec139 100644 --- a/modules/http2/h2_from_h1.c +++ b/modules/http2/h2_from_h1.c @@ -291,7 +291,8 @@ static void fix_vary(request_rec *r) } } -static void set_basic_http_header(request_rec *r, apr_table_t *headers) +void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r, + apr_pool_t *pool) { char *date = NULL; const char *proxy_date = NULL; @@ -302,7 +303,7 @@ static void set_basic_http_header(request_rec *r, apr_table_t *headers) * keep the set-by-proxy server and date headers, otherwise * generate a new server header / date header */ - if (r->proxyreq != PROXYREQ_NONE) { + if (r && r->proxyreq != PROXYREQ_NONE) { proxy_date = apr_table_get(r->headers_out, "Date"); if (!proxy_date) { /* @@ -310,25 +311,29 @@ static void set_basic_http_header(request_rec *r, apr_table_t *headers) * our own Date header and pass it over to proxy_date later to * avoid a compiler warning. */ - date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); + date = apr_palloc(pool, APR_RFC822_DATE_LEN); ap_recent_rfc822_date(date, r->request_time); } server = apr_table_get(r->headers_out, "Server"); } else { - date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); - ap_recent_rfc822_date(date, r->request_time); + date = apr_palloc(pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r? r->request_time : apr_time_now()); } apr_table_setn(headers, "Date", proxy_date ? proxy_date : date ); - apr_table_unset(r->headers_out, "Date"); + if (r) { + apr_table_unset(r->headers_out, "Date"); + } if (!server && *us) { server = us; } if (server) { apr_table_setn(headers, "Server", server); - apr_table_unset(r->headers_out, "Server"); + if (r) { + apr_table_unset(r->headers_out, "Server"); + } } } @@ -445,7 +450,7 @@ static h2_response *create_response(h2_from_h1 *from_h1, request_rec *r) headers = apr_table_make(r->pool, 10); - set_basic_http_header(r, headers); + h2_from_h1_set_basic_http_header(headers, r, r->pool); if (r->status == HTTP_NOT_MODIFIED) { apr_table_do((int (*)(void *, const char *, const char *)) copy_header, (void *) headers, r->headers_out, diff --git a/modules/http2/h2_from_h1.h b/modules/http2/h2_from_h1.h index af5dea24c1..71cc35faa9 100644 --- a/modules/http2/h2_from_h1.h +++ b/modules/http2/h2_from_h1.h @@ -69,4 +69,7 @@ apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb); apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb); +void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r, + apr_pool_t *pool); + #endif /* defined(__mod_h2__h2_from_h1__) */ diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c index da5829f3a9..a88b071855 100644 --- a/modules/http2/h2_mplx.c +++ b/modules/http2/h2_mplx.c @@ -282,11 +282,11 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->sready = h2_ihash_create(m->pool, offsetof(h2_stream,id)); m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id)); m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id)); m->q = h2_iq_create(m->pool, m->max_streams); m->tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id)); - m->ready_tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id)); m->stream_timeout = stream_timeout; m->workers = workers; @@ -373,7 +373,6 @@ static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master) && !task->rst_error); h2_ihash_remove(m->tasks, task->stream_id); - h2_ihash_remove(m->ready_tasks, task->stream_id); if (m->redo_tasks) { h2_ihash_remove(m->redo_tasks, task->stream_id); } @@ -428,7 +427,7 @@ static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error) * stream destruction until the task is done. */ h2_iq_remove(m->q, stream->id); - h2_ihash_remove(m->ready_tasks, stream->id); + h2_ihash_remove(m->sready, stream->id); h2_ihash_remove(m->streams, stream->id); if (stream->input) { m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input); @@ -657,11 +656,10 @@ apr_status_t h2_mplx_in_update_windows(h2_mplx *m) return status; } -static int task_iter_first(void *ctx, void *val) +static int stream_iter_first(void *ctx, void *val) { - task_iter_ctx *tctx = ctx; - h2_task *task = val; - tctx->task = task; + h2_stream **pstream = ctx; + *pstream = val; return 0; } @@ -673,17 +671,11 @@ h2_stream *h2_mplx_next_submit(h2_mplx *m) AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { - task_iter_ctx ctx; - ctx.m = m; - ctx.task = NULL; - h2_ihash_iter(m->ready_tasks, task_iter_first, &ctx); - - if (ctx.task && !m->aborted) { - h2_task *task = ctx.task; - - h2_ihash_remove(m->ready_tasks, task->stream_id); - stream = h2_ihash_get(m->streams, task->stream_id); - if (stream && task) { + h2_ihash_iter(m->sready, stream_iter_first, &stream); + if (stream) { + h2_task *task = h2_ihash_get(m->tasks, stream->id); + h2_ihash_remove(m->sready, stream->id); + if (task) { task->submitted = 1; if (task->rst_error) { h2_stream_rst(stream, task->rst_error); @@ -694,24 +686,11 @@ h2_stream *h2_mplx_next_submit(h2_mplx *m) task->output.beam); } } - else if (task) { - /* We have the io ready, but the stream has gone away, maybe - * reset by the client. Should no longer happen since such - * streams should clear io's from the ready queue. - */ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03347) - "h2_mplx(%s): stream for response closed, " - "resetting io to close request processing", - task->id); - h2_task_rst(task, H2_ERR_STREAM_CLOSED); - if (!task->worker_started || task->worker_done) { - task_destroy(m, task, 1); - } - else { - /* hang around until the h2_task is done, but - * shutdown output */ - h2_task_shutdown(task, 0); - } + else { + /* We have the stream ready without a task. This happens + * when we fail streams early. A response should already + * be present. */ + AP_DEBUG_ASSERT(stream->response || stream->rst_error); } } leave_mutex(m, acquired); @@ -744,7 +723,7 @@ static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response) h2_beam_mutex_set(task->output.beam, beam_enter, task->cond, m); } - h2_ihash_add(m->ready_tasks, task); + h2_ihash_add(m->sready, stream); if (response && response->http_status < 300) { /* we might see some file buckets in the output, see * if we have enough handles reserved. */ @@ -788,10 +767,9 @@ static apr_status_t out_close(h2_mplx *m, h2_task *task) if (!task->response && !task->rst_error) { /* In case a close comes before a response was created, - * insert an error one so that our streams can properly - * reset. + * insert an error one so that our streams can properly reset. */ - h2_response *r = h2_response_die(task->stream_id, APR_EGENERAL, + h2_response *r = h2_response_die(task->stream_id, 500, task->request, m->pool); status = out_open(m, task->stream_id, r); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, @@ -876,6 +854,10 @@ apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, if (m->aborted) { status = APR_ECONNABORTED; } + else if (stream->response) { + /* already have a respone, schedule for submit */ + h2_ihash_add(m->sready, stream); + } else { h2_beam_create(&stream->input, stream->pool, stream->id, "input", 0); diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h index 9b316b0b3f..17cc75f1ee 100644 --- a/modules/http2/h2_mplx.h +++ b/modules/http2/h2_mplx.h @@ -73,12 +73,12 @@ struct h2_mplx { unsigned int need_registration : 1; struct h2_ihash_t *streams; /* all streams currently processing */ + struct h2_ihash_t *sready; /* all streams ready for response */ struct h2_ihash_t *shold; /* all streams done with task ongoing */ struct h2_ihash_t *spurge; /* all streams done, ready for destroy */ struct h2_iqueue *q; /* all stream ids that need to be started */ struct h2_ihash_t *tasks; /* all tasks started and not destroyed */ - struct h2_ihash_t *ready_tasks; /* all tasks ready for submit */ struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */ apr_uint32_t max_streams; /* max # of concurrent streams */ diff --git a/modules/http2/h2_response.c b/modules/http2/h2_response.c index eb9043d0db..4cafd3550e 100644 --- a/modules/http2/h2_response.c +++ b/modules/http2/h2_response.c @@ -171,13 +171,14 @@ h2_response *h2_response_die(int stream_id, apr_status_t type, { apr_table_t *headers = apr_table_make(pool, 5); char *date = NULL; + int status = (type >= 200 && type < 600)? type : 500; date = apr_palloc(pool, APR_RFC822_DATE_LEN); ap_recent_rfc822_date(date, req->request_time); apr_table_setn(headers, "Date", date); apr_table_setn(headers, "Server", ap_get_server_banner()); - return h2_response_create_int(stream_id, 0, 500, headers, NULL, pool); + return h2_response_create_int(stream_id, 0, status, headers, NULL, pool); } h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from) diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c index ce5c5c771b..999a360f28 100644 --- a/modules/http2/h2_session.c +++ b/modules/http2/h2_session.c @@ -1301,7 +1301,8 @@ static apr_status_t submit_response(h2_session *session, h2_stream *stream) else { ++session->responses_submitted; } - + stream->submitted = 1; + if (nghttp2_is_fatal(rv)) { status = APR_EGENERAL; dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv)); diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c index dcc25da424..9f55d0276f 100644 --- a/modules/http2/h2_stream.c +++ b/modules/http2/h2_stream.c @@ -290,6 +290,41 @@ apr_status_t h2_stream_add_header(h2_stream *stream, const char *value, size_t vlen) { AP_DEBUG_ASSERT(stream); + if (stream->response) { + /* already have the response, ignore any more request headers + (hint: response might already be a failure due to previous errs */ + return APR_SUCCESS; + } + + if ((nlen > 0) && name[0] == ':') { + if ((vlen) > stream->session->s->limit_req_line) { + /* pseudo header: approximation of request line size check */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + "h2_stream(%ld-%d): pseudo header %s too long", + stream->session->id, stream->id, name); + return h2_stream_set_error(stream, + HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE); + } + } + else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) { + /* header too long */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + "h2_stream(%ld-%d): header %s too long", + stream->session->id, stream->id, name); + return h2_stream_set_error(stream, + HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE); + } + + ++stream->request_headers_added; + if (stream->request_headers_added > stream->session->s->limit_req_fields) { + /* too many header lines */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, + "h2_stream(%ld-%d): too many header lines", + stream->session->id, stream->id); + return h2_stream_set_error(stream, + HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE); + } + if (h2_stream_is_scheduled(stream)) { return h2_request_add_trailer(stream->request, stream->pool, name, nlen, value, vlen); @@ -321,6 +356,11 @@ apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled, close_input(stream); } + if (stream->response) { + /* already have a resonse, probably a HTTP error code */ + return h2_mplx_process(stream->session->mplx, stream, cmp, ctx); + } + /* Seeing the end-of-headers, we have everything we need to * start processing it. */ @@ -510,6 +550,18 @@ apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response, return status; } +apr_status_t h2_stream_set_error(h2_stream *stream, int http_status) +{ + h2_response *response; + + if (stream->submitted) { + return APR_EINVAL; + } + response = h2_response_die(stream->id, http_status, stream->request, + stream->pool); + return h2_stream_set_response(stream, response, NULL); +} + static const apr_size_t DATA_CHUNK_SIZE = ((16*1024) - 100 - 9); apr_status_t h2_stream_out_prepare(h2_stream *stream, diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h index 33f28f6eab..84f9717140 100644 --- a/modules/http2/h2_stream.h +++ b/modules/http2/h2_stream.h @@ -49,7 +49,8 @@ struct h2_stream { apr_pool_t *pool; /* the memory pool for this stream */ struct h2_request *request; /* the request made in this stream */ struct h2_bucket_beam *input; - + int request_headers_added; /* number of request headers added */ + struct h2_response *response; struct h2_bucket_beam *output; apr_bucket_brigade *buffer; @@ -188,6 +189,11 @@ apr_status_t h2_stream_set_response(h2_stream *stream, struct h2_response *response, struct h2_bucket_beam *output); +/** + * Set the HTTP error status as response. + */ +apr_status_t h2_stream_set_error(h2_stream *stream, int http_status); + /** * Do a speculative read on the stream output to determine the * amount of data that can be read. -- 2.50.1