]> granicus.if.org Git - apache/commitdiff
Add a comment about an assumption we make in our keepalive buffering.
authorGreg Stein <gstein@apache.org>
Tue, 1 May 2001 18:43:09 +0000 (18:43 +0000)
committerGreg Stein <gstein@apache.org>
Tue, 1 May 2001 18:43:09 +0000 (18:43 +0000)
Delay the check for "too many items in an iovec" until we actually try to
put something in there. This allows that N+1 bucket to be an EOS, FLUSH,
FILE, or zero-length bucket without triggering a split. Only if that next
bucket has iovec data will a split be made.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@88969 13f79535-47bb-0310-9956-ffa450edef68

server/core.c

index b371b2db24917ed95590ac263ddec65400b3f5a9..6e2706d990cb7cb398fb218b6f0e9852b8ffd57e 100644 (file)
@@ -3092,29 +3092,31 @@ static apr_status_t core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
 
                 rv = apr_bucket_read(e, &str, &n, APR_BLOCK_READ);
                 if (n) {
-                    nbytes += n;
                     if (!fd) {
+                        if (nvec == MAX_IOVEC_TO_WRITE) {
+                            /* woah! too many. stop now. */
+                            more = apr_brigade_split(b, e);
+                            break;
+                        }
                         vec[nvec].iov_base = (char*) str;
                         vec[nvec].iov_len = n;
                         nvec++;
                     }
                     else {
                         /* The bucket is a trailer to a file bucket */
+
+                        if (nvec_trailers == MAX_IOVEC_TO_WRITE) {
+                            /* woah! too many. stop now. */
+                            more = apr_brigade_split(b, e);
+                            break;
+                        }
                         vec_trailers[nvec_trailers].iov_base = (char*) str;
                         vec_trailers[nvec_trailers].iov_len = n;
                         nvec_trailers++;
                     }
+                    nbytes += n;
                 }
             }
-    
-            if ((nvec == MAX_IOVEC_TO_WRITE) || 
-                (nvec_trailers == MAX_IOVEC_TO_WRITE)) {
-                /* Split the brigade and break */
-                if (APR_BUCKET_NEXT(e) != APR_BRIGADE_SENTINEL(b)) {
-                    more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
-                }
-                break;
-            }
         }
     
         /* Completed iterating over the brigades, now determine if we want 
@@ -3153,6 +3155,17 @@ static apr_status_t core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
                     apr_size_t n;
 
                     rv = apr_bucket_read(bucket, &str, &n, APR_BLOCK_READ);
+
+                    /* This apr_brigade_write does not use a flush function
+                       because we assume that we will not write enough data
+                       into it to cause a flush. However, if we *do* write
+                       "too much", then we could end up with transient
+                       buckets which would suck. This works for now, but is
+                       a bit shaky if changes are made to some of the
+                       buffering sizes. Let's do an assert to prevent
+                       potential future problems... */
+                    AP_DEBUG_ASSERT(AP_MIN_BYTES_TO_WRITE <
+                                    APR_BUCKET_BUFF_SIZE);
                     apr_brigade_write(ctx->b, NULL, NULL, str, n);
                 }
                 apr_brigade_destroy(b);