]> granicus.if.org Git - apache/commitdiff
Zero-copy output for small files delivered over keepalive
authorBrian Pane <brianp@apache.org>
Sun, 18 Aug 2002 07:16:01 +0000 (07:16 +0000)
committerBrian Pane <brianp@apache.org>
Sun, 18 Aug 2002 07:16:01 +0000 (07:16 +0000)
connections (previously, the setaside code in the core output
filter would copy file buckets <8KB into the heap, resulting
in poor keepalive performance)

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@96431 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
server/core.c

diff --git a/CHANGES b/CHANGES
index 65f87b4731756af7094dcee2b03cd3f932b630c9..dc237e1cd06d47992397f09413c8956220eefb45 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,5 +1,9 @@
 Changes with Apache 2.0.41
 
+  *) Performance improvement for keepalive requests: when setting
+     aside a small file for potential concatenation with the next
+     response on the connection, set aside the file descriptor rather
+     than copying the file into the heap.  [Brian Pane]
 
 Changes with Apache 2.0.40
 
index 346d785dddbeba05e2a85f3f8ef6f2d2c3b5d631..ad68dda1c43667bac7ad3a6e36c3d2069e73698b 100644 (file)
@@ -3782,46 +3782,42 @@ static apr_status_t core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
              * we want to process to second request fully.
              */
             if (APR_BUCKET_IS_EOS(last_e)) {
-                apr_bucket *bucket = NULL;
-                /* If we are in here, then this request is a keepalive.  We
-                 * need to be certain that any data in a bucket is valid
-                 * after the request_pool is cleared.
-                 */
-                if (ctx->b == NULL) {
-                    ctx->b = apr_brigade_create(net->c->pool,
-                                                net->c->bucket_alloc);
-                }
-
-                APR_BRIGADE_FOREACH(bucket, b) {
-                    const char *str;
-                    apr_size_t n;
-
-                    rv = apr_bucket_read(bucket, &str, &n, APR_BLOCK_READ);
-
-                    /* This apr_brigade_write does not use a flush function
-                       because we assume that we will not write enough data
-                       into it to cause a flush. However, if we *do* write
-                       "too much", then we could end up with transient
-                       buckets which would suck. This works for now, but is
-                       a bit shaky if changes are made to some of the
-                       buffering sizes. Let's do an assert to prevent
-                       potential future problems... */
-                    AP_DEBUG_ASSERT(AP_MIN_BYTES_TO_WRITE <=
-                                    APR_BUCKET_BUFF_SIZE);
-                    if (rv != APR_SUCCESS) {
-                        ap_log_error(APLOG_MARK, APLOG_ERR, rv, c->base_server,
-                                     "core_output_filter: Error reading from bucket.");
-                        return HTTP_INTERNAL_SERVER_ERROR;
+                apr_bucket *bucket;
+                int file_bucket_saved = 0;
+                APR_BUCKET_REMOVE(last_e);
+                for (bucket = APR_BRIGADE_FIRST(b);
+                     bucket != APR_BRIGADE_SENTINEL(b);
+                     bucket = APR_BUCKET_NEXT(bucket)) {
+
+                    /* Do a read on each bucket to pull in the
+                     * data from pipe and socket buckets, so
+                     * that we don't leave their file descriptors
+                     * open indefinitely.  Do the same for file
+                     * buckets, with one exception: allow the
+                     * first file bucket in the brigade to remain
+                     * a file bucket, so that we don't end up
+                     * doing an mmap+memcpy every time a client
+                     * requests a <8KB file over a keepalive
+                     * connection.
+                     */
+                    if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) {
+                        file_bucket_saved = 1;
+                    }
+                    else {
+                        const char *buf;
+                        apr_size_t len = 0;
+                        rv = apr_bucket_read(bucket, &buf, &len,
+                                             APR_BLOCK_READ);
+                        if (rv != APR_SUCCESS) {
+                            ap_log_error(APLOG_MARK, APLOG_ERR, rv,
+                                         c->base_server, "core_output_filter:"
+                                         " Error reading from bucket.");
+                            return HTTP_INTERNAL_SERVER_ERROR;
+                        }
                     }
-
-                    apr_brigade_write(ctx->b, NULL, NULL, str, n);
                 }
-
-                apr_brigade_destroy(b);
-            }
-            else {
-                ap_save_brigade(f, &ctx->b, &b, c->pool);
             }
+            ap_save_brigade(f, &ctx->b, &b, c->pool);
 
             return APR_SUCCESS;
         }