From: Paul J. Reder Date: Wed, 2 Oct 2002 18:26:52 +0000 (+0000) Subject: Fix a core dump in mod_cache when it attemtped to store uncopyable X-Git-Tag: 2.0.43~4 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=70b40483dcab41f90f1476e1e2fb92b42952eaaf;p=apache Fix a core dump in mod_cache when it attemtped to store uncopyable buckets. This happened, for instance, when a file to be cached contained SSI tags to execute a CGI script (passed as a pipe bucket). [Paul J. Reder] git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@97058 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/CHANGES b/CHANGES index 2dc754500e..83b39ca091 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,10 @@ Changes with Apache 2.0.43 + *) Fix a core dump in mod_cache when it attemtped to store uncopyable + buckets. This happened, for instance, when a file to be cached + contained SSI tags to execute a CGI script (passed as a pipe + bucket). [Paul J. Reder] + *) Ensure that output already available is flushed to the network when the content-length filter realizes that no new output will be available for a while. This helps some streaming CGIs as diff --git a/modules/experimental/mod_cache.c b/modules/experimental/mod_cache.c index 78d34c04c3..4402fda646 100644 --- a/modules/experimental/mod_cache.c +++ b/modules/experimental/mod_cache.c @@ -679,7 +679,36 @@ static int cache_in_filter(ap_filter_t *f, apr_bucket_brigade *in) } APR_BRIGADE_FOREACH(e, in) { apr_bucket *copy; - apr_bucket_copy(e, ©); + rv = apr_bucket_copy(e, ©); + if (rv == APR_ENOTIMPL) { + const char *str; + apr_size_t len; + + /* This takes care of uncopyable buckets. */ + rv = apr_bucket_read(e, &str, &len, APR_BLOCK_READ); + if ((rv == APR_SUCCESS) && + (cache->saved_size + len <= + conf->max_streaming_buffer_size)) { + rv = apr_bucket_copy(e, ©); + } + + if ((rv != APR_SUCCESS) || + (cache->saved_size + len > + conf->max_streaming_buffer_size)){ + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, + "cache: not caching streamed response for " + "%s because length %s", url, + "> CacheMaxStreamingBuffer"); + + if (cache->saved_brigade != NULL) { + apr_brigade_destroy(cache->saved_brigade); + cache->saved_brigade = NULL; + cache->saved_size = 0; + } + ap_remove_output_filter(f); + return ap_pass_brigade(f->next, in); + } + } APR_BRIGADE_INSERT_TAIL(cache->saved_brigade, copy); } cache->saved_size += size; @@ -1055,7 +1084,7 @@ static const char *set_max_streaming_buffer(cmd_parms *parms, void *dummy, &cache_module); val = (apr_off_t)strtol(arg, &err, 10); if (*err != 0) { - return "CacheMaxStreamingBuffer value must be a percentage"; + return "CacheMaxStreamingBuffer value must be a number"; } conf->max_streaming_buffer_size = val; return NULL; diff --git a/modules/experimental/mod_mem_cache.c b/modules/experimental/mod_mem_cache.c index 7b91505005..60d1768c2c 100644 --- a/modules/experimental/mod_mem_cache.c +++ b/modules/experimental/mod_mem_cache.c @@ -991,10 +991,10 @@ static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_bri obj->count+=len; } } - /* This should not happen, but if it does, we are in BIG trouble + /* This should not fail, but if it does, we are in BIG trouble * cause we just stomped all over the heap. */ - AP_DEBUG_ASSERT(obj->count >= mobj->m_len); + AP_DEBUG_ASSERT(obj->count <= mobj->m_len); } return APR_SUCCESS; }