]> granicus.if.org Git - apache/commitdiff
use bucket brigades directly when reading PUT data. This avoids
authorAndré Malo <nd@apache.org>
Wed, 6 Aug 2003 14:46:48 +0000 (14:46 +0000)
committerAndré Malo <nd@apache.org>
Wed, 6 Aug 2003 14:46:48 +0000 (14:46 +0000)
problems with content-length-modifying input filter (like deflate).

PR: 22104
Some stuff submitted by: tim@robbins.dropbear.id.au (Tim Robbins)

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@100919 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
modules/dav/main/mod_dav.c

diff --git a/CHANGES b/CHANGES
index 3dcc150a6e325a69789b254fccbb26508dfecaea..bf1985e16f688b1de3c925b6bf621e557682ea48 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -2,6 +2,10 @@ Changes with Apache 2.1.0-dev
 
   [Remove entries to the current 2.0 section below, when backported]
 
+  *) mod_dav: Use bucket brigades when reading PUT data. This avoids
+     problems if the data stream is modified by an input filter. PR 22104.
+     [Tim Robbins <tim@robbins.dropbear.id.au>, André Malo]
+
   *) mod_rewrite: RewriteRules in server context using the force
      type feature [T=...] no longer disable MultiViews.  [André Malo]
 
index 2f966515f66554579492f527c500acb774ebb2be..bdccd76727586a042c3e2e77ea34beb63ba99d06 100644 (file)
@@ -898,7 +898,6 @@ static int dav_method_put(request_rec *r)
     const char *body;
     dav_error *err;
     dav_error *err2;
-    int result;
     dav_stream_mode mode;
     dav_stream *stream;
     dav_response *multi_response;
@@ -906,10 +905,6 @@ static int dav_method_put(request_rec *r)
     apr_off_t range_start;
     apr_off_t range_end;
 
-    if ((result = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK)) != OK) {
-        return result;
-    }
-
     /* Ask repository module to resolve the resource */
     err = dav_get_resource(r, 0 /* label_allowed */, 0 /* use_checked_in */,
                            &resource);
@@ -984,39 +979,61 @@ static int dav_method_put(request_rec *r)
     }
 
     if (err == NULL) {
-        if (ap_should_client_block(r)) {
-            char *buffer = apr_palloc(r->pool, DAV_READ_BLOCKSIZE);
-            long len;
-
-            /*
-             * Once we start reading the request, then we must read the
-             * whole darn thing. ap_discard_request_body() won't do anything
-             * for a partially-read request.
-             */
+        apr_bucket_brigade *bb;
+        apr_bucket *b;
+        int seen_eos = 0;
 
-            while ((len = ap_get_client_block(r, buffer,
-                                              DAV_READ_BLOCKSIZE)) > 0) {
-                   if (err == NULL) {
-                       /* write whatever we read, until we see an error */
-                       err = (*resource->hooks->write_stream)(stream,
-                                                              buffer, len);
-                   }
-            }
+        bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
 
-            /*
-             * ### what happens if we read more/less than the amount
-             * ### specified in the Content-Range? eek...
-             */
+        if (!bb) {
+            err = dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+                                "Could not create bucket brigade");
+        }
+        else {
+            do {
+                apr_status_t rc;
 
-            if (len == -1) {
-                /*
-                 * Error reading request body. This has precedence over
-                 * prior errors.
-                 */
-                err = dav_new_error(r->pool, HTTP_BAD_REQUEST, 0,
-                                    "An error occurred while reading the "
-                                    "request body.");
-            }
+                rc = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+                                    APR_BLOCK_READ, DAV_READ_BLOCKSIZE);
+
+                if (rc != APR_SUCCESS) {
+                    err = dav_new_error(r->pool, HTTP_INTERNAL_SERVER_ERROR, 0,
+                                        "Could not get next bucket brigade");
+                    break;
+                }
+
+                APR_BRIGADE_FOREACH(b, bb) {
+                    const char *data;
+                    apr_size_t len;
+
+                    if (APR_BUCKET_IS_EOS(b)) {
+                        seen_eos = 1;
+                        break;
+                    }
+
+                    if (APR_BUCKET_IS_METADATA(b)) {
+                        continue;
+                    }
+
+                    rc = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+                    if (rc != APR_SUCCESS) {
+                        err = dav_new_error(r->pool, HTTP_BAD_REQUEST, 0,
+                                            "An error occurred while reading "
+                                            "the request body.");
+                        break;
+                    }
+
+                    if (err == NULL) {
+                        /* write whatever we read, until we see an error */
+                        err = (*resource->hooks->write_stream)(stream, data,
+                                                               len);
+                    }
+                }
+
+                apr_brigade_cleanup(bb);
+            } while (!seen_eos);
+
+            apr_brigade_destroy(bb);
         }
 
         err2 = (*resource->hooks->close_stream)(stream,