From: Ryan Bloom Date: Tue, 21 Nov 2000 20:17:20 +0000 (+0000) Subject: Modify the content-length filter to change the criteria used to determine X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=c7730616536c2eb365c6be4ac87d546f353859a2;p=apache Modify the content-length filter to change the criteria used to determine if/when we compute the content-length. There are just a few cases now: 1) We already have all the data 2) We don't have all the data and: 2a) This is a 1.1 request but we can't chunk 2b) The is a keep-alive request In the future, we probably want to modify this to not be a keep-alive request. This filter always buffers 9K of data. The reason is simple, the core will buffer 9K at a time anyway, and there is a chance that we may get the end of the request before we hit 9K. This increases our chances of being able to send a c-l. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@87055 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/include/http_core.h b/include/http_core.h index a7fdeb140a..05c15deb21 100644 --- a/include/http_core.h +++ b/include/http_core.h @@ -121,6 +121,10 @@ extern "C" { #define SATISFY_ANY 1 #define SATISFY_NOSPEC 2 +/* Make sure we don't write less than 4096 bytes at any one time. + */ +#define AP_MIN_BYTES_TO_WRITE 9000 + /** * Retrieve the value of Options for this request * @param r The current request diff --git a/modules/http/http_core.c b/modules/http/http_core.c index 85104860c1..a9dfd1faf2 100644 --- a/modules/http/http_core.c +++ b/modules/http/http_core.c @@ -89,10 +89,6 @@ #include #endif -/* Make sure we don't write less than 4096 bytes at any one time. - */ -#define MIN_BYTES_TO_WRITE 9000 - /* LimitXMLRequestBody handling */ #define AP_LIMIT_UNSET ((long) -1) #define AP_DEFAULT_LIMIT_XML_BODY ((size_t)1000000) @@ -3063,7 +3059,7 @@ static apr_status_t coalesce_filter(ap_filter_t *f, ap_bucket_brigade *b) if (ctx == NULL) { f->ctx = ctx = apr_pcalloc(p, sizeof(coalesce_filter_ctx_t)); - ctx->avail = MIN_BYTES_TO_WRITE; + ctx->avail = AP_MIN_BYTES_TO_WRITE; } if (ctx->cnt) { @@ -3093,7 +3089,7 @@ static apr_status_t coalesce_filter(ap_filter_t *f, ap_bucket_brigade *b) if ((n < MIN_BUCKET_SIZE) && (n < ctx->avail)) { /* Coalesce this bucket into the buffer */ if (ctx->buf == NULL) { - ctx->buf = apr_palloc(p, MIN_BYTES_TO_WRITE); + ctx->buf = apr_palloc(p, AP_MIN_BYTES_TO_WRITE); ctx->cur = ctx->buf; ctx->cnt = 0; } @@ -3156,7 +3152,7 @@ static apr_status_t coalesce_filter(ap_filter_t *f, ap_bucket_brigade *b) if (ctx) { ctx->cur = ctx->buf; ctx->cnt = 0; - ctx->avail = MIN_BYTES_TO_WRITE; + ctx->avail = AP_MIN_BYTES_TO_WRITE; } } else { @@ -3390,7 +3386,7 @@ static apr_status_t core_output_filter(ap_filter_t *f, ap_bucket_brigade *b) /* Completed iterating over the brigades, now determine if we want to * buffer the brigade or send the brigade out on the network */ - if ((!fd && (!more) && (nbytes < MIN_BYTES_TO_WRITE) && !AP_BUCKET_IS_FLUSH(e)) + if ((!fd && (!more) && (nbytes < AP_MIN_BYTES_TO_WRITE) && !AP_BUCKET_IS_FLUSH(e)) || (AP_BUCKET_IS_EOS(e) && c->keepalive)) { /* NEVER save an EOS in here. If we are saving a brigade with an diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c index f7f104c882..bc20adcd11 100644 --- a/modules/http/http_protocol.c +++ b/modules/http/http_protocol.c @@ -2239,7 +2239,7 @@ AP_DECLARE(void) ap_send_http_header(request_rec *r) struct content_length_ctx { ap_bucket_brigade *saved; - int hold_data; /* Whether or not to buffer the data. */ + int compute_len; }; /* This filter computes the content length, but it also computes the number @@ -2258,32 +2258,6 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_content_length_filter(ap_filter_t *f, ctx = f->ctx; if (!ctx) { /* first time through */ f->ctx = ctx = apr_pcalloc(r->pool, sizeof(struct content_length_ctx)); - - /* We won't compute a content length if one of the following is true: - * . subrequest - * . HTTP/0.9 - * . status HTTP_NOT_MODIFIED or HTTP_NO_CONTENT - * . HEAD - * . content length already computed - * . can be chunked - * . body already chunked - * Much of this should correspond to checks in ap_set_keepalive(). - */ - if ((r->assbackwards - || r->status == HTTP_NOT_MODIFIED - || r->status == HTTP_NO_CONTENT - || r->header_only - || r->proto_num == HTTP_VERSION(1,1) - || ap_find_last_token(f->r->pool, - apr_table_get(r->headers_out, - "Transfer-Encoding"), - "chunked")) - && (!AP_BUCKET_IS_EOS(AP_BRIGADE_LAST(b)))) { - ctx->hold_data = 0; - } - else { - ctx->hold_data = 1; - } } AP_BRIGADE_FOREACH(e, b) { @@ -2305,7 +2279,34 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_content_length_filter(ap_filter_t *f, r->bytes_sent += length; } - if (ctx->hold_data) { /* calculating content length? */ + if (r->bytes_sent < AP_MIN_BYTES_TO_WRITE) { + ap_save_brigade(f, &ctx->saved, &b); + return APR_SUCCESS; + } + + /* We will compute a content length if: + * We already have all the data + * This is a bit confusing, because we will always buffer up + * to AP_MIN_BYTES_TO_WRITE, so if we get all the data while + * we are buffering that much data, we set the c-l. + * or We are in a 1.1 request and we can't chunk + * or This is a keepalive connection + * We may want to change this later to just close the connection + */ + if ((r->proto_num == HTTP_VERSION(1,1) + && !ap_find_last_token(f->r->pool, + apr_table_get(r->headers_out, + "Transfer-Encoding"), + "chunked")) + || (f->r->connection->keepalive) + || (AP_BUCKET_IS_EOS(AP_BRIGADE_LAST(b)))) { + ctx->compute_len = 1; + } + else { + ctx->compute_len = 0; + } + + if (ctx->compute_len) { /* save the brigade; we can't pass any data to the next * filter until we have the entire content length */ @@ -2313,12 +2314,12 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_content_length_filter(ap_filter_t *f, ap_save_brigade(f, &ctx->saved, &b); return APR_SUCCESS; } - if (ctx->saved) { - AP_BRIGADE_CONCAT(ctx->saved, b); - b = ctx->saved; - } ap_set_content_length(r, r->bytes_sent); } + if (ctx->saved) { + AP_BRIGADE_CONCAT(ctx->saved, b); + b = ctx->saved; + } return ap_pass_brigade(f->next, b); }