CACHE_HASH_KEY_STRING,
NULL);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "Cache Purge of %s",c->key_entry(ejected));
c->current_size -= c->size_entry(ejected);
c->free_entry(ejected);
c->total_purges++;
* delete all URL entities from the cache
*
*/
-int cache_remove_url(cache_request_rec *cache, apr_pool_t *p)
+int cache_remove_url(cache_request_rec *cache, request_rec *r)
{
cache_provider_list *list;
cache_handle_t *h;
if (!h) {
return OK;
}
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
"cache: Removing url %s from the cache", h->cache_obj->key);
/* for each specified cache type, delete the URL */
while(list) {
- list->provider->remove_url(h, p);
+ list->provider->remove_url(h, r);
list = list->next;
}
return OK;
if (!cache) {
/* This should never happen */
- ap_log_error(APLOG_MARK, APLOG_ERR, APR_EGENERAL, r->server,
- "cache: No cache request information available for key"
- " generation");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, r,
+ "cache: No cache request information available for key"
+ " generation");
return APR_EGENERAL;
}
if (!cache) {
/* This should never happen */
- ap_log_error(APLOG_MARK, APLOG_ERR, APR_EGENERAL, r->server,
- "cache: No cache request information available for key"
- " generation");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, r,
+ "cache: No cache request information available for key"
+ " generation");
return DECLINED;
}
}
else {
/* headers do not match, so Vary failed */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server,
- "cache_select_url(): Vary header mismatch.");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "cache_select_url(): Vary header mismatch.");
mismatch = 1;
}
}
r->headers_in);
cache->stale_handle = h;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
"Cached response for %s isn't fresh. Adding/replacing "
"conditional request headers.", r->uri);
/* if Cache-Control: only-if-cached, and not cached, return 504 */
if (cache->control_in.only_if_cached) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
"cache: 'only-if-cached' requested and no cached entity, "
"returning 504 Gateway Timeout for: %s", r->uri);
return HTTP_GATEWAY_TIME_OUT;
* resource in the cache under a key where it is never found by the quick
* handler during following requests.
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
- "cache: Key for entity %s?%s is %s", r->uri,
- r->parsed_uri.query, *key);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
+ "cache: Key for entity %s?%s is %s", r->uri,
+ r->parsed_uri.query, *key);
return APR_SUCCESS;
}
/**
* cache_storage.c
*/
-int cache_remove_url(cache_request_rec *cache, apr_pool_t *p);
+int cache_remove_url(cache_request_rec *cache, request_rec *r);
int cache_create_entity(cache_request_rec *cache, request_rec *r,
apr_off_t size, apr_bucket_brigade *in);
int cache_select(cache_request_rec *cache, request_rec *r);
path = apr_pstrcat(r->pool, conf->lockpath, dir, NULL);
if (APR_SUCCESS != (status = apr_dir_make_recursive(path,
APR_UREAD|APR_UWRITE|APR_UEXECUTE, r->pool))) {
- ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
- "Could not create a cache lock directory: %s",
- path);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r,
+ "Could not create a cache lock directory: %s",
+ path);
return status;
}
lockname = apr_pstrcat(r->pool, path, "/", lockname, NULL);
status = apr_stat(&finfo, lockname,
APR_FINFO_MTIME | APR_FINFO_NLINK, r->pool);
if (!(APR_STATUS_IS_ENOENT(status)) && APR_SUCCESS != status) {
- ap_log_error(APLOG_MARK, APLOG_ERR, APR_EEXIST, r->server,
- "Could not stat a cache lock file: %s",
- lockname);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EEXIST, r,
+ "Could not stat a cache lock file: %s",
+ lockname);
return status;
}
if ((status == APR_SUCCESS) && (((now - finfo.mtime) > conf->lockmaxage)
|| (now < finfo.mtime))) {
- ap_log_error(APLOG_MARK, APLOG_INFO, status, r->server,
- "Cache lock file for '%s' too old, removing: %s",
- r->uri, lockname);
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r,
+ "Cache lock file for '%s' too old, removing: %s",
+ r->uri, lockname);
apr_file_remove(lockname, r->pool);
}
return 0;
}
else {
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "Incoming request is asking for an uncached version of "
- "%s, but we have been configured to ignore it and serve "
- "cached content anyway", r->unparsed_uri);
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "Incoming request is asking for an uncached version of "
+ "%s, but we have been configured to ignore it and serve "
+ "cached content anyway", r->unparsed_uri);
}
}
return 0;
}
else {
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "Incoming request is asking for a no-store version of "
- "%s, but we have been configured to ignore it and serve "
- "cached content anyway", r->unparsed_uri);
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "Incoming request is asking for a no-store version of "
+ "%s, but we have been configured to ignore it and serve "
+ "cached content anyway", r->unparsed_uri);
}
}
return 0;
}
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "Incoming request is asking for a uncached version of "
- "%s, but we have been configured to ignore it and "
- "serve a cached response anyway",
- r->unparsed_uri);
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "Incoming request is asking for a uncached version of "
+ "%s, but we have been configured to ignore it and "
+ "serve a cached response anyway",
+ r->unparsed_uri);
}
/* These come from the cached entity. */
status = cache_try_lock(conf, cache, r);
if (APR_SUCCESS == status) {
/* we obtained a lock, follow the stale path */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "Cache lock obtained for stale cached URL, "
- "revalidating entry: %s",
- r->unparsed_uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "Cache lock obtained for stale cached URL, "
+ "revalidating entry: %s",
+ r->unparsed_uri);
return 0;
}
else if (APR_EEXIST == status) {
/* lock already exists, return stale data anyway, with a warning */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "Cache already locked for stale cached URL, "
- "pretend it is fresh: %s",
- r->unparsed_uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "Cache already locked for stale cached URL, "
+ "pretend it is fresh: %s",
+ r->unparsed_uri);
/* make sure we don't stomp on a previous warning */
warn_head = apr_table_get(h->resp_hdrs, "Warning");
}
else {
/* some other error occurred, just treat the object as stale */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server,
- "Attempt to obtain a cache lock for stale "
- "cached URL failed, revalidating entry anyway: %s",
- r->unparsed_uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r,
+ "Attempt to obtain a cache lock for stale "
+ "cached URL failed, revalidating entry anyway: %s",
+ r->unparsed_uri);
return 0;
}
* or not.
*/
if (r->main) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server,
- "Adding CACHE_SAVE_SUBREQ filter for %s",
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "Adding CACHE_SAVE_SUBREQ filter for %s",
r->uri);
cache->save_filter = ap_add_output_filter_handle(
cache_save_subreq_filter_handle, cache, r,
r->connection);
}
else {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server, "Adding CACHE_SAVE filter for %s",
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "Adding CACHE_SAVE filter for %s",
r->uri);
cache->save_filter = ap_add_output_filter_handle(
cache_save_filter_handle, cache, r,
apr_pool_userdata_setn(cache, CACHE_CTX_KEY, NULL, r->pool);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
"Adding CACHE_REMOVE_URL filter for %s",
r->uri);
cache, r, r->connection);
}
else {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv,
- r->server, "Cache locked for url, not caching "
- "response: %s", r->uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv,
+ r, "Cache locked for url, not caching "
+ "response: %s", r->uri);
}
}
else {
if (cache->stale_headers) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server, "Restoring request headers for %s",
- r->uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "Restoring request headers for %s",
+ r->uri);
r->headers_in = cache->stale_headers;
}
* the headers. */
if (lookup) {
if (cache->stale_headers) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
- "Restoring request headers.");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
+ "Restoring request headers.");
r->headers_in = cache->stale_headers;
}
}
* or not.
*/
if (r->main) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server,
- "Adding CACHE_SAVE_SUBREQ filter for %s",
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "Adding CACHE_SAVE_SUBREQ filter for %s",
r->uri);
cache_save_handle = cache_save_subreq_filter_handle;
}
else {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server, "Adding CACHE_SAVE filter for %s",
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "Adding CACHE_SAVE filter for %s",
r->uri);
cache_save_handle = cache_save_filter_handle;
}
*/
if (cache_replace_filter(r->output_filters,
cache_filter_handle, cache_save_handle)) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server, "Replacing CACHE with CACHE_SAVE "
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "Replacing CACHE with CACHE_SAVE "
"filter for %s", r->uri);
}
apr_pool_userdata_setn(cache, CACHE_CTX_KEY, NULL, r->pool);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
"Adding CACHE_REMOVE_URL filter for %s",
r->uri);
}
else {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv,
- r->server, "Cache locked for url, not caching "
- "response: %s", r->uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv,
+ r, "Cache locked for url, not caching "
+ "response: %s", r->uri);
}
}
else {
* place.
*/
if (cache_replace_filter(r->output_filters, cache_filter_handle, cache_out_handle)) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
- r->server, "Replacing CACHE with CACHE_OUT filter for %s",
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS,
+ r, "Replacing CACHE with CACHE_OUT filter for %s",
r->uri);
}
if (!cache) {
/* user likely configured CACHE_OUT manually; they should use mod_cache
* configuration to do that */
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "CACHE/CACHE_OUT filter enabled while caching is disabled, ignoring");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "CACHE/CACHE_OUT filter enabled while caching is disabled, ignoring");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, in);
}
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
- "cache: running CACHE_OUT filter");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
+ "cache: running CACHE_OUT filter");
/* clean out any previous response up to EOS, if any */
for (e = APR_BRIGADE_FIRST(in);
/* This filter is done once it has served up its content */
ap_remove_output_filter(f);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
- "cache: serving %s", r->uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
+ "cache: serving %s", r->uri);
return ap_pass_brigade(f->next, in);
}
rv = cache->provider->store_body(cache->handle, f->r, in, cache->out);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, f->r->server,
- "cache: Cache provider's store_body failed!");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, f->r,
+ "cache: Cache provider's store_body failed!");
ap_remove_output_filter(f);
/* give someone else the chance to cache the file */
/* oops, no data out, but not all data read in either, be
* safe and stand down to prevent a spin.
*/
- ap_log_error(APLOG_MARK, APLOG_WARNING, rv, f->r->server,
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, f->r,
"cache: Cache provider's store_body returned an "
- "empty brigade, but didn't consume all of the"
- "input brigade, standing down to prevent a spin");
+ "empty brigade, but didn't consume all of the"
+ "input brigade, standing down to prevent a spin");
ap_remove_output_filter(f);
/* give someone else the chance to cache the file */
/* user likely configured CACHE_SAVE manually; they should really use
* mod_cache configuration to do that
*/
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "CACHE/CACHE_SAVE filter enabled while caching is disabled, ignoring");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "CACHE/CACHE_SAVE filter enabled while caching is disabled, ignoring");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, in);
}
}
if (reason) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: %s not cached. Reason: %s", r->unparsed_uri,
- reason);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "cache: %s not cached. Reason: %s", r->unparsed_uri,
+ reason);
/* we've got a cache miss! tell anyone who cares */
cache_run_cache_status(cache->handle, r, r->headers_out, AP_CACHE_MISS,
return ap_pass_brigade(f->next, in);
}
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: Caching url: %s", r->unparsed_uri);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "cache: Caching url: %s", r->unparsed_uri);
/* We are actually caching this response. So it does not
* make sense to remove this entity any more.
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: Removing CACHE_REMOVE_URL filter.");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "cache: Removing CACHE_REMOVE_URL filter.");
ap_remove_output_filter(cache->remove_url_filter);
/*
/* if it's in the future, then replace by date */
lastmod = date;
lastmods = dates;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
- r->server,
- "cache: Last modified is in the future, "
- "replacing with now");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0,
+ r, "cache: Last modified is in the future, "
+ "replacing with now");
}
/* if no expiry date then
* the body it is safe to try and remove the url from the cache.
*/
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, r->server,
- "cache: updating headers with store_headers failed. "
- "Removing cached url.");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "cache: updating headers with store_headers failed. "
+ "Removing cached url.");
- rv = cache->provider->remove_url(cache->stale_handle, r->pool);
+ rv = cache->provider->remove_url(cache->stale_handle, r);
if (rv != OK) {
/* Probably a mod_disk_cache cache area has been (re)mounted
* read-only, or that there is a permissions problem.
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, r->server,
- "cache: attempt to remove url from cache unsuccessful.");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "cache: attempt to remove url from cache unsuccessful.");
}
/* we've got a cache conditional hit! tell anyone who cares */
}
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, r->server,
- "cache: store_headers failed");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
+ "cache: store_headers failed");
/* we've got a cache miss! tell anyone who cares */
cache_run_cache_status(cache->handle, r, r->headers_out, AP_CACHE_MISS,
* 1. Remove ourselves
* 2. Do nothing and bail out
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: CACHE_REMOVE_URL enabled unexpectedly");
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "cache: CACHE_REMOVE_URL enabled unexpectedly");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, in);
}
/* Now remove this cache entry from the cache */
- cache_remove_url(cache, r->pool);
+ cache_remove_url(cache, r);
/* remove ourselves */
ap_remove_output_filter(f);
static int cache_filter(ap_filter_t *f, apr_bucket_brigade *in)
{
/* we are just a marker, so let's just remove ourselves */
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, f->r->server,
- "cache: CACHE filter was added twice, or was added in quick "
- "handler mode and will be ignored.");
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, f->r,
+ "cache: CACHE filter was added twice, or was added in quick "
+ "handler mode and will be ignored.");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, in);
}
const char *urlkey, apr_off_t len, apr_bucket_brigade *bb);
int (*open_entity) (cache_handle_t *h, request_rec *r,
const char *urlkey);
- int (*remove_url) (cache_handle_t *h, apr_pool_t *p);
+ int (*remove_url) (cache_handle_t *h, request_rec *r);
apr_status_t (*commit_entity)(cache_handle_t *h, request_rec *r);
} cache_provider;
rv = safe_file_rename(conf, file->tempfile, file->file, file->pool);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
- "disk_cache: rename tempfile to file failed:"
- " %s -> %s", file->tempfile, file->file);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r,
+ "disk_cache: rename tempfile to file failed:"
+ " %s -> %s", file->tempfile, file->file);
apr_file_remove(file->tempfile, file->pool);
}
/* we don't support caching of range requests (yet) */
if (r->status == HTTP_PARTIAL_CONTENT) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: URL %s partial content response not cached",
- key);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: URL %s partial content response not cached",
+ key);
return DECLINED;
}
/* Note, len is -1 if unknown so don't trust it too hard */
if (len > dconf->maxfs) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: URL %s failed the size check "
- "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")",
- key, len, dconf->maxfs);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: URL %s failed the size check "
+ "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")",
+ key, len, dconf->maxfs);
return DECLINED;
}
if (len >= 0 && len < dconf->minfs) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: URL %s failed the size check "
- "(%" APR_OFF_T_FMT " < %" APR_OFF_T_FMT ")",
- key, len, dconf->minfs);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: URL %s failed the size check "
+ "(%" APR_OFF_T_FMT " < %" APR_OFF_T_FMT ")",
+ key, len, dconf->minfs);
return DECLINED;
}
if (conf->cache_root == NULL) {
if (!error_logged) {
error_logged = 1;
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "disk_cache: Cannot cache files to disk without a CacheRoot specified.");
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "disk_cache: Cannot cache files to disk without a CacheRoot specified.");
}
return DECLINED;
}
varray = apr_array_make(r->pool, 5, sizeof(char*));
rc = read_array(r, varray, dobj->vary.fd);
if (rc != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server,
- "disk_cache: Cannot parse vary header file: %s",
- dobj->vary.file);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
+ "disk_cache: Cannot parse vary header file: %s",
+ dobj->vary.file);
apr_file_close(dobj->vary.fd);
return DECLINED;
}
}
}
else if (format != DISK_FORMAT_VERSION) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "disk_cache: File '%s' has a version mismatch. File had version: %d.",
- dobj->vary.file, format);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "disk_cache: File '%s' has a version mismatch. File had version: %d.",
+ dobj->vary.file, format);
apr_file_close(dobj->vary.fd);
return DECLINED;
}
/* Read the bytes to setup the cache_info fields */
rc = file_cache_recall_mydata(dobj->hdrs.fd, info, dobj, r);
if (rc != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server,
- "disk_cache: Cannot read header file %s", dobj->hdrs.file);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
+ "disk_cache: Cannot read header file %s", dobj->hdrs.file);
apr_file_close(dobj->hdrs.fd);
return DECLINED;
}
/* Is this a cached HEAD request? */
if (dobj->disk_info.header_only && !r->header_only) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
- "disk_cache: HEAD request cached, non-HEAD requested, ignoring: %s",
- dobj->hdrs.file);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
+ "disk_cache: HEAD request cached, non-HEAD requested, ignoring: %s",
+ dobj->hdrs.file);
return DECLINED;
}
#endif
rc = apr_file_open(&dobj->data.fd, dobj->data.file, flags, 0, r->pool);
if (rc != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
"disk_cache: Cannot open data file %s", dobj->data.file);
apr_file_close(dobj->hdrs.fd);
return DECLINED;
dobj->disk_info.device == finfo.device) {
/* Initialize the cache_handle callback functions */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: Recalled cached URL info header %s", dobj->name);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: Recalled cached URL info header %s", dobj->name);
return OK;
}
}
/* Oh dear, no luck matching header to the body */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: Cached URL info header '%s' didn't match body, ignoring this entry",
- dobj->name);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: Cached URL info header '%s' didn't match body, ignoring this entry",
+ dobj->name);
return DECLINED;
}
return OK;
}
-static int remove_url(cache_handle_t *h, apr_pool_t *p)
+static int remove_url(cache_handle_t *h, request_rec *r)
{
apr_status_t rc;
disk_cache_object_t *dobj;
/* Delete headers file */
if (dobj->hdrs.file) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
- "disk_cache: Deleting %s from cache.", dobj->hdrs.file);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: Deleting %s from cache.", dobj->hdrs.file);
- rc = apr_file_remove(dobj->hdrs.file, p);
+ rc = apr_file_remove(dobj->hdrs.file, r->pool);
if ((rc != APR_SUCCESS) && !APR_STATUS_IS_ENOENT(rc)) {
/* Will only result in an output if httpd is started with -e debug.
* For reason see log_error_core for the case s == NULL.
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rc, NULL,
- "disk_cache: Failed to delete headers file %s from cache.",
- dobj->hdrs.file);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rc, r,
+ "disk_cache: Failed to delete headers file %s from cache.",
+ dobj->hdrs.file);
return DECLINED;
}
}
/* Delete data file */
if (dobj->data.file) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
- "disk_cache: Deleting %s from cache.", dobj->data.file);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: Deleting %s from cache.", dobj->data.file);
- rc = apr_file_remove(dobj->data.file, p);
+ rc = apr_file_remove(dobj->data.file, r->pool);
if ((rc != APR_SUCCESS) && !APR_STATUS_IS_ENOENT(rc)) {
/* Will only result in an output if httpd is started with -e debug.
* For reason see log_error_core for the case s == NULL.
*/
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rc, NULL,
- "disk_cache: Failed to delete data file %s from cache.",
- dobj->data.file);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rc, r,
+ "disk_cache: Failed to delete data file %s from cache.",
+ dobj->data.file);
return DECLINED;
}
}
if (str_to_copy) {
char *dir, *slash, *q;
- dir = apr_pstrdup(p, str_to_copy);
+ dir = apr_pstrdup(r->pool, str_to_copy);
/* remove filename */
slash = strrchr(dir, '/');
* we won't either delete or go above our cache root.
*/
for (q = dir + dobj->root_len; *q ; ) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL,
- "disk_cache: Deleting directory %s from cache",
- dir);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: Deleting directory %s from cache",
+ dir);
- rc = apr_dir_remove(dir, p);
+ rc = apr_dir_remove(dir, r->pool);
if (rc != APR_SUCCESS && !APR_STATUS_IS_ENOENT(rc)) {
break;
}
++maybeASCII;
}
if (maybeASCII > maybeEBCDIC) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "CGI Interface Error: Script headers apparently ASCII: (CGI = %s)",
- r->filename);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "CGI Interface Error: Script headers apparently ASCII: (CGI = %s)",
+ r->filename);
inbytes_left = outbytes_left = cp - w;
apr_xlate_conv_buffer(ap_hdrs_from_ascii,
w, &inbytes_left, w, &outbytes_left);
/* This case should not happen... */
if (!dobj->hdrs.fd) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "disk_cache: recalling headers; but no header fd for %s", dobj->name);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "disk_cache: recalling headers; but no header fd for %s", dobj->name);
return APR_NOTFOUND;
}
apr_file_close(dobj->hdrs.fd);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: Recalled headers for URL %s", dobj->name);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: Recalled headers for URL %s", dobj->name);
return APR_SUCCESS;
}
dobj->vary.pool);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
- "disk_cache: could not create temp file %s",
- dobj->vary.tempfile);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r,
+ "disk_cache: could not create temp file %s",
+ dobj->vary.tempfile);
return rv;
}
APR_BUFFERED | APR_EXCL, dobj->hdrs.pool);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
- "disk_cache: could not create temp file %s",
- dobj->hdrs.tempfile);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r,
+ "disk_cache: could not create temp file %s",
+ dobj->hdrs.tempfile);
return rv;
}
rv = apr_file_writev(dobj->hdrs.tempfd, (const struct iovec *) &iov, 2, &amt);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
- "disk_cache: could not write info to header file %s",
- dobj->hdrs.tempfile);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r,
+ "disk_cache: could not write info to header file %s",
+ dobj->hdrs.tempfile);
apr_file_close(dobj->hdrs.tempfd);
return rv;
}
if (dobj->headers_out) {
rv = store_table(dobj->hdrs.tempfd, dobj->headers_out);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
- "disk_cache: could not write out-headers to header file %s",
- dobj->hdrs.tempfile);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r,
+ "disk_cache: could not write out-headers to header file %s",
+ dobj->hdrs.tempfile);
apr_file_close(dobj->hdrs.tempfd);
return rv;
}
if (dobj->headers_in) {
rv = store_table(dobj->hdrs.tempfd, dobj->headers_in);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
- "disk_cache: could not write in-headers to header file %s",
- dobj->hdrs.tempfile);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r,
+ "disk_cache: could not write in-headers to header file %s",
+ dobj->hdrs.tempfile);
apr_file_close(dobj->hdrs.tempfd);
return rv;
}
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(dobj->bb, e);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "disk_cache: Error when reading bucket for URL %s",
- h->cache_obj->key);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "disk_cache: Error when reading bucket for URL %s",
+ h->cache_obj->key);
/* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
APR_BRIGADE_CONCAT(out, dobj->bb);
/* write to the cache, leave if we fail */
rv = apr_file_write_full(dobj->data.tempfd, str, length, &written);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
- "disk_cache: Error when writing cache file for URL %s",
- h->cache_obj->key);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "disk_cache: Error when writing cache file for URL %s",
+ h->cache_obj->key);
/* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
APR_BRIGADE_CONCAT(out, dobj->bb);
}
dobj->file_size += written;
if (dobj->file_size > dconf->maxfs) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: URL %s failed the size check "
- "(%" APR_OFF_T_FMT ">%" APR_OFF_T_FMT ")",
- h->cache_obj->key, dobj->file_size, dconf->maxfs);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: URL %s failed the size check "
+ "(%" APR_OFF_T_FMT ">%" APR_OFF_T_FMT ")",
+ h->cache_obj->key, dobj->file_size, dconf->maxfs);
/* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
APR_BRIGADE_CONCAT(out, dobj->bb);
}
if (r->connection->aborted || r->no_cache) {
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "disk_cache: Discarding body for URL %s "
- "because connection has been aborted.",
- h->cache_obj->key);
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "disk_cache: Discarding body for URL %s "
+ "because connection has been aborted.",
+ h->cache_obj->key);
/* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
return APR_EGENERAL;
}
if (dobj->file_size < dconf->minfs) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: URL %s failed the size check "
- "(%" APR_OFF_T_FMT "<%" APR_OFF_T_FMT ")",
- h->cache_obj->key, dobj->file_size, dconf->minfs);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: URL %s failed the size check "
+ "(%" APR_OFF_T_FMT "<%" APR_OFF_T_FMT ")",
+ h->cache_obj->key, dobj->file_size, dconf->minfs);
/* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
return APR_EGENERAL;
if (cl_header) {
apr_int64_t cl = apr_atoi64(cl_header);
if ((errno == 0) && (dobj->file_size != cl)) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: URL %s didn't receive complete response, not caching",
- h->cache_obj->key);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: URL %s didn't receive complete response, not caching",
+ h->cache_obj->key);
/* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(dobj->data.pool);
return APR_EGENERAL;
/* remove the cached items completely on any failure */
if (APR_SUCCESS != rv) {
- remove_url(h, r->pool);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: commit_entity: URL '%s' not cached due to earlier disk error.",
- dobj->name);
+ remove_url(h, r);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: commit_entity: URL '%s' not cached due to earlier disk error.",
+ dobj->name);
}
else {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "disk_cache: commit_entity: Headers and body for URL %s cached.",
- dobj->name);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ "disk_cache: commit_entity: Headers and body for URL %s cached.",
+ dobj->name);
}
apr_pool_destroy(dobj->data.pool);