]> granicus.if.org Git - apache/commitdiff
fixed window update on chunked uploads, moved handling of chunking to later stage
authorStefan Eissing <icing@apache.org>
Fri, 11 Dec 2015 12:57:32 +0000 (12:57 +0000)
committerStefan Eissing <icing@apache.org>
Fri, 11 Dec 2015 12:57:32 +0000 (12:57 +0000)
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1719403 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
modules/http2/h2_conn.c
modules/http2/h2_h2.c
modules/http2/h2_io.c
modules/http2/h2_io.h
modules/http2/h2_mplx.c
modules/http2/h2_stream.c
modules/http2/h2_task.h
modules/http2/h2_task_input.c

diff --git a/CHANGES b/CHANGES
index 1c081d6af6f7510c9d6e93996924ff0eead3f8a9..a1c24981c09e6abfa6cd873b78e61064f0cd871f 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,8 +1,8 @@
                                                          -*- coding: utf-8 -*-
 Changes with Apache 2.5.0
 
-  *) mod_ssl: for all ssl_engine_vars.c lookups, fall back to master connection
-     if conn_rec itself holds no valid SSLConnRec*. Fixes PR58666.
+  *) mod_http2: fixed bug in input window size calculation by moving chunked
+     request body encoding into later stage of processing.
      [Stefan Eissing]
      
   *) mod_proxy_fdpass: Fix AH01153 error when using the default configuration.
index b9b6e87bf0e51eb6c035501ddf27327a5ddac7c6..8d14b09c7cf00627615ecc67630c8f9bfb6827be 100644 (file)
@@ -163,6 +163,9 @@ apr_status_t h2_conn_process(conn_rec *c, request_rec *r, server_rec *s)
                               NGHTTP2_INADEQUATE_SECURITY, NULL, 0);
     } 
 
+    /* What do install instead? */
+    ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
+    
     ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
     status = h2_session_start(session, &rv);
     
index 2f93340d9c61a59d8d3a524e82659b240bcb90ab..dee6f1e14937f9efc966365f089b2dc137dadbe1 100644 (file)
@@ -33,6 +33,8 @@
 #include "h2_config.h"
 #include "h2_ctx.h"
 #include "h2_conn.h"
+#include "h2_session.h"
+#include "h2_util.h"
 #include "h2_h2.h"
 
 const char *h2_tls_protos[] = {
@@ -436,7 +438,6 @@ static int cipher_is_blacklisted(const char *cipher, const char **psource)
  * - process_conn take over connection in case of h2
  */
 static int h2_h2_process_conn(conn_rec* c);
-static int h2_h2_remove_timeout(conn_rec* c);
 static int h2_h2_post_read_req(request_rec *r);
 
 
@@ -553,7 +554,6 @@ int h2_allows_h2_upgrade(conn_rec *c)
 /*******************************************************************************
  * Register various hooks
  */
-static const char *const mod_reqtimeout[] = { "reqtimeout.c", NULL};
 static const char* const mod_ssl[]        = {"mod_ssl.c", NULL};
 
 void h2_h2_register_hooks(void)
@@ -564,11 +564,6 @@ void h2_h2_register_hooks(void)
     ap_hook_process_connection(h2_h2_process_conn, 
                                mod_ssl, NULL, APR_HOOK_MIDDLE);
                                
-    /* Perform connection cleanup before the actual processing happens.
-     */
-    ap_hook_process_connection(h2_h2_remove_timeout, 
-                               mod_reqtimeout, NULL, APR_HOOK_LAST);
-    
     /* With "H2SerializeHeaders On", we install the filter in this hook
      * that parses the response. This needs to happen before any other post
      * read function terminates the request with an error. Otherwise we will
@@ -577,18 +572,6 @@ void h2_h2_register_hooks(void)
     ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST);
 }
 
-static int h2_h2_remove_timeout(conn_rec* c)
-{
-    h2_ctx *ctx = h2_ctx_get(c);
-    
-    if (h2_ctx_is_active(ctx) && !h2_ctx_is_task(ctx)) {
-        /* cleanup on master h2 connections */
-        ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
-    }
-    
-    return DECLINED;
-}
-
 int h2_h2_process_conn(conn_rec* c)
 {
     h2_ctx *ctx = h2_ctx_get(c);
@@ -660,28 +643,32 @@ int h2_h2_process_conn(conn_rec* c)
 
 static int h2_h2_post_read_req(request_rec *r)
 {
-    h2_ctx *ctx = h2_ctx_rget(r);
-    struct h2_task *task = h2_ctx_get_task(ctx);
-    if (task) {
-        /* FIXME: sometimes, this hook gets called twice for a single request.
-         * This should not be, right? */
-        /* h2_task connection for a stream, not for h2c */
-        ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
-                      "adding h1_to_h2_resp output filter");
-        if (task->serialize_headers) {
-            ap_remove_output_filter_byhandle(r->output_filters, "H1_TO_H2_RESP");
-            ap_add_output_filter("H1_TO_H2_RESP", task, r, r->connection);
-        }
-        else {
-            /* replace the core http filter that formats response headers
-             * in HTTP/1 with our own that collects status and headers */
-            ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
-            ap_remove_output_filter_byhandle(r->output_filters, "H2_RESPONSE");
-            ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
+    /* slave connection? */
+    if (r->connection->master) {
+        h2_ctx *ctx = h2_ctx_rget(r);
+        struct h2_task *task = h2_ctx_get_task(ctx);
+        /* This hook will get called twice on internal redirects. Take care
+         * that we manipulate filters only once. */
+        /* our slave connection? */
+        if (task && !task->filters_set) {
+            /* setup the correct output filters to process the response
+             * on the proper mod_http2 way. */
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "adding task output filter");
+            if (task->serialize_headers) {
+                ap_add_output_filter("H1_TO_H2_RESP", task, r, r->connection);
+            }
+            else {
+                /* replace the core http filter that formats response headers
+                 * in HTTP/1 with our own that collects status and headers */
+                ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+                ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
+            }
+            ap_add_output_filter("H2_TRAILERS", task, r, r->connection);
+            task->filters_set = 1;
         }
-        ap_add_output_filter("H2_TRAILERS", task, r, r->connection);
     }
     return DECLINED;
 }
 
 
+
index 98ba60e6abb405a3a1fe43a71e3e83599da50182..9e968286565acfab01c4ba949b4069fe12f35549 100644 (file)
@@ -23,6 +23,7 @@
 #include "h2_private.h"
 #include "h2_io.h"
 #include "h2_response.h"
+#include "h2_request.h"
 #include "h2_task.h"
 #include "h2_util.h"
 
@@ -33,8 +34,6 @@ h2_io *h2_io_create(int id, apr_pool_t *pool, apr_bucket_alloc_t *bucket_alloc)
         io->id = id;
         io->pool = pool;
         io->bucket_alloc = bucket_alloc;
-        io->bbin = NULL;
-        io->bbout = NULL;
     }
     return io;
 }
@@ -96,11 +95,39 @@ apr_status_t h2_io_in_shutdown(h2_io *io)
     return h2_io_in_close(io);
 }
 
+static int add_trailer(void *ctx, const char *key, const char *value)
+{
+    apr_bucket_brigade *bb = ctx;
+    apr_status_t status;
+    
+    status = apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", 
+                                key, value);
+    return (status == APR_SUCCESS);
+}
+
+static apr_status_t append_eos(h2_io *io, apr_bucket_brigade *bb)
+{
+    apr_status_t status = APR_SUCCESS;
+    
+    if (io->request->chunked) {
+        apr_table_t *trailers = io->request->trailers;
+        if (trailers && !apr_is_empty_table(trailers)) {
+            status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+            apr_table_do(add_trailer, bb, trailers, NULL);
+            status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+        }
+        else {
+            status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+        }
+    }
+    APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(io->bbin->bucket_alloc));
+    return status;
+}
+
 apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb, 
                            apr_size_t maxlen)
 {
     apr_off_t start_len = 0;
-    apr_bucket *last;
     apr_status_t status;
 
     if (io->rst_error) {
@@ -108,21 +135,56 @@ apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb,
     }
     
     if (!io->bbin || APR_BRIGADE_EMPTY(io->bbin)) {
-        return io->eos_in? APR_EOF : APR_EAGAIN;
+        if (io->eos_in) {
+            if (!io->eos_in_written) {
+                status = append_eos(io, bb);
+                io->eos_in_written = 1;
+                return status;
+            }
+            return APR_EOF;
+        }
+        return APR_EAGAIN;
     }
     
-    apr_brigade_length(bb, 1, &start_len);
-    last = APR_BRIGADE_LAST(bb);
-    status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read");
-    if (status == APR_SUCCESS) {
-        apr_bucket *nlast = APR_BRIGADE_LAST(bb);
-        apr_off_t end_len = 0;
-        apr_brigade_length(bb, 1, &end_len);
-        if (last == nlast) {
-            return APR_EAGAIN;
+    if (io->request->chunked) {
+        /* the reader expects HTTP/1.1 chunked encoding */
+        if (!io->tmp) {
+            io->tmp = apr_brigade_create(io->pool, io->bucket_alloc);
+        }
+        apr_brigade_cleanup(io->tmp);
+        
+        status = h2_util_move(io->tmp, io->bbin, maxlen, NULL, "h2_io_in_read_chunk");
+        if (status == APR_SUCCESS) {
+            apr_off_t tmp_len = 0;
+            
+            apr_brigade_length(io->tmp, 1, &tmp_len);
+            if (tmp_len > 0) {
+                io->input_consumed += tmp_len;
+                status = apr_brigade_printf(bb, NULL, NULL, "%lx\r\n", 
+                                            (unsigned long)tmp_len);
+                if (status == APR_SUCCESS) {
+                    status = h2_util_move(bb, io->tmp, -1, NULL, "h2_io_in_read_tmp1");
+                    if (status == APR_SUCCESS) {
+                        status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+                    }
+                }
+            }
+            else {
+                status = h2_util_move(bb, io->tmp, -1, NULL, "h2_io_in_read_tmp2");
+            }
         }
-        io->input_consumed += (end_len - start_len);
     }
+    else {
+        apr_brigade_length(bb, 1, &start_len);
+        
+        status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read");
+        if (status == APR_SUCCESS) {
+            apr_off_t end_len = 0;
+            apr_brigade_length(bb, 1, &end_len);
+            io->input_consumed += (end_len - start_len);
+        }
+    }
+    
     return status;
 }
 
@@ -151,10 +213,6 @@ apr_status_t h2_io_in_close(h2_io *io)
         return APR_ECONNABORTED;
     }
     
-    if (io->bbin) {
-        APR_BRIGADE_INSERT_TAIL(io->bbin, 
-                                apr_bucket_eos_create(io->bbin->bucket_alloc));
-    }
     io->eos_in = 1;
     return APR_SUCCESS;
 }
index 08f3aa3dad73d8a6f3cb4ff638169091fd58b397..6dd447421197c7075546cb4a97881547a9024cc3 100644 (file)
@@ -40,6 +40,7 @@ struct h2_io {
     int rst_error;
 
     int eos_in;
+    int eos_in_written;
     apr_bucket_brigade *bbin;    /* input data for stream */
     struct apr_thread_cond_t *input_arrived; /* block on reading */
     apr_size_t input_consumed;   /* how many bytes have been read */
@@ -50,6 +51,7 @@ struct h2_io {
     struct apr_thread_cond_t *output_drained; /* block on writing */
     
     int files_handles_owned;
+    apr_bucket_brigade *tmp;     /* temporary data for chunking */
 };
 
 /*******************************************************************************
index 333a9b5c6bd7b0fba1a91cd0cbeaed7a6f0cc564..4c3d16dce97b449e28202d56f67db016324f8c23 100644 (file)
@@ -357,6 +357,9 @@ apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block,
             while (APR_STATUS_IS_EAGAIN(status) 
                    && !is_aborted(m, &status)
                    && block == APR_BLOCK_READ) {
+                ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
+                              "h2_mplx(%ld-%d): wait on in data (BLOCK_READ)", 
+                              m->id, stream_id);
                 apr_thread_cond_wait(io->input_arrived, m->lock);
                 status = h2_io_in_read(io, bb, -1);
             }
index 58f722bc645ccb3a2e32c92a5372ab5696b3fdda..06eee07eccafedd88233ca17e6452ace9ea35109 100644 (file)
@@ -351,40 +351,9 @@ static apr_status_t input_flush(apr_bucket_brigade *bb, void *ctx)
 }
 
 static apr_status_t input_add_data(h2_stream *stream,
-                                   const char *data, size_t len, int chunked)
+                                   const char *data, size_t len)
 {
-    apr_status_t status = APR_SUCCESS;
-    
-    if (chunked) {
-        status = apr_brigade_printf(stream->bbin, input_flush, stream,
-                                    "%lx\r\n", (unsigned long)len);
-        if (status == APR_SUCCESS) {
-            status = apr_brigade_write(stream->bbin, input_flush, stream, data, len);
-            if (status == APR_SUCCESS) {
-                status = apr_brigade_puts(stream->bbin, input_flush, stream, "\r\n");
-            }
-        }
-    }
-    else {
-        status = apr_brigade_write(stream->bbin, input_flush, stream, data, len);
-    }
-    return status;
-}
-
-static int input_add_header(void *str, const char *key, const char *value)
-{
-    h2_stream *stream = str;
-    apr_status_t status = input_add_data(stream, key, strlen(key), 0);
-    if (status == APR_SUCCESS) {
-        status = input_add_data(stream, ": ", 2, 0);
-        if (status == APR_SUCCESS) {
-            status = input_add_data(stream, value, strlen(value), 0);
-            if (status == APR_SUCCESS) {
-                status = input_add_data(stream, "\r\n", 2, 0);
-            }
-        }
-    }
-    return (status == APR_SUCCESS);
+    return apr_brigade_write(stream->bbin, input_flush, stream, data, len);
 }
 
 apr_status_t h2_stream_close_input(h2_stream *stream)
@@ -402,21 +371,7 @@ apr_status_t h2_stream_close_input(h2_stream *stream)
     
     H2_STREAM_IN(APLOG_TRACE2, stream, "close_pre");
     if (close_input(stream) && stream->bbin) {
-        if (stream->request->chunked) {
-            apr_table_t *trailers = stream->request->trailers;
-            if (trailers && !apr_is_empty_table(trailers)) {
-                status = input_add_data(stream, "0\r\n", 3, 0);
-                apr_table_do(input_add_header, stream, trailers, NULL);
-                status = input_add_data(stream, "\r\n", 2, 0);
-            }
-            else {
-                status = input_add_data(stream, "0\r\n\r\n", 5, 0);
-            }
-        }
-        
-        if (status == APR_SUCCESS) {
-            status = h2_stream_input_flush(stream);
-        }
+        status = h2_stream_input_flush(stream);
         if (status == APR_SUCCESS) {
             status = h2_mplx_in_close(stream->session->mplx, stream->id);
         }
@@ -444,13 +399,7 @@ apr_status_t h2_stream_write_data(h2_stream *stream,
                   stream->session->id, stream->id, (long)len);
 
     H2_STREAM_IN(APLOG_TRACE2, stream, "write_data_pre");
-    if (stream->request->chunked) {
-        /* if input may have a body and we have not seen any
-         * content-length header, we need to chunk the input data.
-         */
-        status = input_add_data(stream, data, len, 1);
-    }
-    else {
+    if (!stream->request->chunked) {
         stream->input_remaining -= len;
         if (stream->input_remaining < 0) {
             ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
@@ -463,8 +412,9 @@ apr_status_t h2_stream_write_data(h2_stream *stream,
             h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
             return APR_ECONNABORTED;
         }
-        status = input_add_data(stream, data, len, 0);
     }
+    
+    status = input_add_data(stream, data, len);
     if (status == APR_SUCCESS) {
         status = h2_stream_input_flush(stream);
     }
index 1cc32d647ce9a71b8c1694ad0e39542155473134..9d3e235312a70974edf455d3ee01dd994280912f 100644 (file)
@@ -51,13 +51,13 @@ struct h2_task {
     struct h2_mplx *mplx;
     
     const struct h2_request *request;
+    int filters_set;
     int input_eos;
 
     int serialize_headers;
-
+    
     struct conn_rec *c;
-
-    apr_pool_t *pool;              /* pool for task lifetime things */
+    apr_pool_t *pool;
     apr_bucket_alloc_t *bucket_alloc;
     struct h2_task_input *input;
     struct h2_task_output *output;
index 49be7cfd228128af6b412f1e36a9d1b899b0a8ec..863480c85896c8f9135a19b0861bb127e6d76728 100644 (file)
@@ -115,6 +115,8 @@ apr_status_t h2_task_input_read(h2_task_input *input,
     }
     
     if ((bblen == 0) && input->task->input_eos) {
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+                      "h2_task_input(%s): eos", input->task->id);
         return APR_EOF;
     }
     
@@ -161,17 +163,17 @@ apr_status_t h2_task_input_read(h2_task_input *input,
     if (!APR_BRIGADE_EMPTY(input->bb)) {
         if (mode == AP_MODE_EXHAUSTIVE) {
             /* return all we have */
-            return h2_util_move(bb, input->bb, readbytes, NULL, 
-                                "task_input_read(exhaustive)");
+            status = h2_util_move(bb, input->bb, readbytes, NULL, 
+                                  "task_input_read(exhaustive)");
         }
         else if (mode == AP_MODE_READBYTES) {
-            return h2_util_move(bb, input->bb, readbytes, NULL, 
-                                "task_input_read(readbytes)");
+            status = h2_util_move(bb, input->bb, readbytes, NULL, 
+                                  "task_input_read(readbytes)");
         }
         else if (mode == AP_MODE_SPECULATIVE) {
             /* return not more than was asked for */
-            return h2_util_copy(bb, input->bb, readbytes,  
-                                "task_input_read(speculative)");
+            status = h2_util_copy(bb, input->bb, readbytes,  
+                                  "task_input_read(speculative)");
         }
         else if (mode == AP_MODE_GETLINE) {
             /* we are reading a single LF line, e.g. the HTTP headers */
@@ -186,7 +188,6 @@ apr_status_t h2_task_input_read(h2_task_input *input,
                               "h2_task_input(%s): getline: %s",
                               input->task->id, buffer);
             }
-            return status;
         }
         else {
             /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
@@ -194,14 +195,25 @@ apr_status_t h2_task_input_read(h2_task_input *input,
             ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
                           APLOGNO(02942) 
                           "h2_task_input, unsupported READ mode %d", mode);
-            return APR_ENOTIMPL;
+            status = APR_ENOTIMPL;
         }
+        
+        if (APLOGctrace1(f->c)) {
+            apr_brigade_length(bb, 0, &bblen);
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+                          "h2_task_input(%s): return %ld data bytes",
+                          input->task->id, (long)bblen);
+        }
+        return status;
     }
     
     if (is_aborted(f)) {
         return APR_ECONNABORTED;
     }
     
-    return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+    status = (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+                  "h2_task_input(%s): no data", input->task->id);
+    return status;
 }