#include "httpd.h"
#include "http_config.h"
#include "http_log.h"
+#include "apr_lib.h"
#include "apr_strings.h"
#include "apr_general.h"
#include "util_filter.h"
#include "http_request.h"
#define APR_WANT_STRFUNC
#include "apr_want.h"
-#include "http_protocol.h"
#include "zlib.h"
#define DEFAULT_MEMLEVEL 9
#define DEFAULT_BUFFERSIZE 8096
+
+/* Check whether a request is gzipped, so we can un-gzip it.
+ * If a request has multiple encodings, we need the gzip
+ * to be the outermost non-identity encoding.
+ */
+static int check_gzip(request_rec *r, apr_table_t *hdrs1, apr_table_t *hdrs2)
+{
+ int found = 0;
+ apr_table_t *hdrs = hdrs1;
+ const char *encoding = apr_table_get(hdrs, "Content-Encoding");
+
+ if (!encoding && (hdrs2 != NULL)) {
+ /* the output filter has two tables and a content_encoding to check */
+ encoding = apr_table_get(hdrs2, "Content-Encoding");
+ hdrs = hdrs2;
+ if (!encoding) {
+ encoding = r->content_encoding;
+ hdrs = NULL;
+ }
+ }
+ if (encoding && *encoding) {
+
+ /* check the usual/simple case first */
+ if (!strcasecmp(encoding, "gzip")
+ || !strcasecmp(encoding, "x-gzip")) {
+ found = 1;
+ if (hdrs) {
+ apr_table_unset(hdrs, "Content-Encoding");
+ }
+ else {
+ r->content_encoding = NULL;
+ }
+ }
+ else if (ap_strchr_c(encoding, ',') != NULL) {
+ /* If the outermost encoding isn't gzip, there's nowt
+ * we can do. So only check the last non-identity token
+ */
+ char *new_encoding = apr_pstrdup(r->pool, encoding);
+ char *ptr;
+ for(;;) {
+ char *token = ap_strrchr(new_encoding, ',');
+ if (!token) { /* gzip:identity or other:identity */
+ if (!strcasecmp(new_encoding, "gzip")
+ || !strcasecmp(new_encoding, "x-gzip")) {
+ found = 1;
+ if (hdrs) {
+ apr_table_unset(hdrs, "Content-Encoding");
+ }
+ else {
+ r->content_encoding = NULL;
+ }
+ }
+ break; /* seen all tokens */
+ }
+ for (ptr=token+1; apr_isspace(*ptr); ++ptr);
+ if (!strcasecmp(ptr, "gzip")
+ || !strcasecmp(ptr, "x-gzip")) {
+ *token = '\0';
+ if (hdrs) {
+ apr_table_setn(hdrs, "Content-Encoding", new_encoding);
+ }
+ else {
+ r->content_encoding = new_encoding;
+ }
+ found = 1;
+ }
+ else if (!ptr[0] || !strcasecmp(ptr, "identity")) {
+ *token = '\0';
+ continue; /* strip the token and find the next one */
+ }
+ break; /* found a non-identity token */
+ }
+ }
+ }
+ /*
+ * If we have dealt with the headers above but content_encoding was set
+ * before sync it with the new value in the hdrs table as
+ * r->content_encoding takes precedence later on in the http_header_filter
+ * and hence would destroy what we have just set in the hdrs table.
+ */
+ if (hdrs && r->content_encoding) {
+ r->content_encoding = apr_table_get(hdrs, "Content-Encoding");
+ }
+ return found;
+}
+
/* Outputs a long in LSB order to the given file
* only the bottom 4 bits are required for the deflate file format.
*/
int (*libz_end_func)(z_streamp);
unsigned char *validation_buffer;
apr_size_t validation_buffer_length;
+ int inflate_init;
+ int filter_init;
} deflate_ctx;
/* Number of validation bytes (CRC and length) after the compressed data */
return APR_SUCCESS;
}
+/* ETag must be unique among the possible representations, so a change
+ * to content-encoding requires a corresponding change to the ETag.
+ * This routine appends -transform (e.g., -gzip) to the entity-tag
+ * value inside the double-quotes if an ETag has already been set
+ * and its value already contains double-quotes. PR 39727
+ */
+static void deflate_check_etag(request_rec *r, const char *transform)
+{
+ const char *etag = apr_table_get(r->headers_out, "ETag");
+ apr_size_t etaglen;
+
+ if ((etag && ((etaglen = strlen(etag)) > 2))) {
+ if (etag[etaglen - 1] == '"') {
+ apr_size_t transformlen = strlen(transform);
+ char *newtag = apr_palloc(r->pool, etaglen + transformlen + 2);
+ char *d = newtag;
+ char *e = d + etaglen - 1;
+ const char *s = etag;
+
+ for (; d < e; ++d, ++s) {
+ *d = *s; /* copy etag to newtag up to last quote */
+ }
+ *d++ = '-'; /* append dash to newtag */
+ s = transform;
+ e = d + transformlen;
+ for (; d < e; ++d, ++s) {
+ *d = *s; /* copy transform to newtag */
+ }
+ *d++ = '"'; /* append quote to newtag */
+ *d = '\0'; /* null terminate newtag */
+
+ apr_table_setn(r->headers_out, "ETag", newtag);
+ }
+ }
+}
+
static apr_status_t deflate_out_filter(ap_filter_t *f,
apr_bucket_brigade *bb)
{
request_rec *r = f->r;
deflate_ctx *ctx = f->ctx;
int zRC;
+ apr_size_t len = 0, blen;
+ const char *data;
deflate_filter_config *c;
/* Do nothing if asked to filter nothing. */
if (APR_BRIGADE_EMPTY(bb)) {
- return ap_pass_brigade(f->next, bb);
+ return APR_SUCCESS;
}
c = ap_get_module_config(r->server->module_config,
char *token;
const char *encoding;
- /* only work on main request/no subrequests */
- if (r->main != NULL) {
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
+ /* We have checked above that bb is not empty */
+ e = APR_BRIGADE_LAST(bb);
+ if (APR_BUCKET_IS_EOS(e)) {
+ /*
+ * If we already know the size of the response, we can skip
+ * compression on responses smaller than the compression overhead.
+ * However, if we compress, we must initialize deflate_out before
+ * calling ap_pass_brigade() for the first time. Otherwise the
+ * headers will be sent to the client without
+ * "Content-Encoding: gzip".
+ */
+ e = APR_BRIGADE_FIRST(bb);
+ while (1) {
+ apr_status_t rc;
+ if (APR_BUCKET_IS_EOS(e)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing very small response of %"
+ APR_SIZE_T_FMT " bytes", len);
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+ if (APR_BUCKET_IS_METADATA(e)) {
+ e = APR_BUCKET_NEXT(e);
+ continue;
+ }
+
+ rc = apr_bucket_read(e, &data, &blen, APR_BLOCK_READ);
+ if (rc != APR_SUCCESS)
+ return rc;
+ len += blen;
+ /* 50 is for Content-Encoding and Vary headers and ETag suffix */
+ if (len > sizeof(gzip_header) + VALIDATION_SIZE + 50)
+ break;
+
+ e = APR_BUCKET_NEXT(e);
+ }
}
- /* some browsers might have problems, so set no-gzip
- * (with browsermatch) for them
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+
+ /*
+ * Only work on main request, not subrequests,
+ * that are not a 204 response with no content
+ * and are not tagged with the no-gzip env variable
+ * and not a partial response to a Range request.
*/
- if (apr_table_get(r->subprocess_env, "no-gzip")) {
+ if ((r->main != NULL) || (r->status == HTTP_NO_CONTENT) ||
+ apr_table_get(r->subprocess_env, "no-gzip") ||
+ apr_table_get(r->headers_out, "Content-Range")
+ ) {
+ if (APLOG_R_IS_LEVEL(r, APLOG_TRACE1)) {
+ const char *reason =
+ (r->main != NULL) ? "subrequest" :
+ (r->status == HTTP_NO_CONTENT) ? "no content" :
+ apr_table_get(r->subprocess_env, "no-gzip") ? "no-gzip" :
+ "content-range";
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing (%s)", reason);
+ }
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
const char *env_value = apr_table_get(r->subprocess_env,
"gzip-only-text/html");
if ( env_value && (strcmp(env_value,"1") == 0) ) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing, (gzip-only-text/html)");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
/* stolen from mod_negotiation: */
if (strcmp(token, "identity") && strcmp(token, "7bit") &&
strcmp(token, "8bit") && strcmp(token, "binary")) {
-
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing (content-encoding already "
+ " set: %s)", token);
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
/* skip parameters, XXX: ;q=foo evaluation? */
while (*accepts == ';') {
++accepts;
- token = ap_get_token(r->pool, &accepts, 1);
+ ap_get_token(r->pool, &accepts, 1);
}
/* retrieve next token */
/* No acceptable token found. */
if (token == NULL || token[0] == '\0') {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing (no Accept-Encoding: gzip)");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
}
-
- /* For a 304 or 204 response there is no entity included in
- * the response and hence nothing to deflate. */
- if (r->status == HTTP_NOT_MODIFIED || r->status == HTTP_NO_CONTENT) {
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Forcing compression (force-gzip set)");
}
- /* We're cool with filtering this. */
- ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
- ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
- ctx->buffer = apr_palloc(r->pool, c->bufferSize);
- ctx->libz_end_func = deflateEnd;
+ /* At this point we have decided to filter the content. Let's try to
+ * to initialize zlib (except for 304 responses, where we will only
+ * send out the headers).
+ */
- zRC = deflateInit2(&ctx->stream, c->compressionlevel, Z_DEFLATED,
- c->windowSize, c->memlevel,
- Z_DEFAULT_STRATEGY);
+ if (r->status != HTTP_NOT_MODIFIED) {
+ ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ ctx->buffer = apr_palloc(r->pool, c->bufferSize);
+ ctx->libz_end_func = deflateEnd;
- if (zRC != Z_OK) {
- deflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "unable to init Zlib: "
- "deflateInit2 returned %d: URL %s",
- zRC, r->uri);
+ zRC = deflateInit2(&ctx->stream, c->compressionlevel, Z_DEFLATED,
+ c->windowSize, c->memlevel,
+ Z_DEFAULT_STRATEGY);
+
+ if (zRC != Z_OK) {
+ deflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01383)
+ "unable to init Zlib: "
+ "deflateInit2 returned %d: URL %s",
+ zRC, r->uri);
+ /*
+ * Remove ourselves as it does not make sense to return:
+ * We are not able to init libz and pass data down the chain
+ * uncompressed.
+ */
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
/*
- * Remove ourselves as it does not make sense to return:
- * We are not able to init libz and pass data down the chain
- * uncompressed.
+ * Register a cleanup function to ensure that we cleanup the internal
+ * libz resources.
*/
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
+ apr_pool_cleanup_register(r->pool, ctx, deflate_ctx_cleanup,
+ apr_pool_cleanup_null);
+
+ /* Set the filter init flag so subsequent invocations know we are
+ * active.
+ */
+ ctx->filter_init = 1;
}
+
/*
- * Register a cleanup function to ensure that we cleanup the internal
- * libz resources.
+ * Zlib initialization worked, so we can now change the important
+ * content metadata before sending the response out.
*/
- apr_pool_cleanup_register(r->pool, ctx, deflate_ctx_cleanup,
- apr_pool_cleanup_null);
-
- /* add immortal gzip header */
- e = apr_bucket_immortal_create(gzip_header, sizeof gzip_header,
- f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
/* If the entire Content-Encoding is "identity", we can replace it. */
if (!encoding || !strcasecmp(encoding, "identity")) {
else {
apr_table_mergen(r->headers_out, "Content-Encoding", "gzip");
}
+ /* Fix r->content_encoding if it was set before */
+ if (r->content_encoding) {
+ r->content_encoding = apr_table_get(r->headers_out,
+ "Content-Encoding");
+ }
apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Content-MD5");
+ deflate_check_etag(r, "gzip");
+
+ /* For a 304 response, only change the headers */
+ if (r->status == HTTP_NOT_MODIFIED) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ /* add immortal gzip header */
+ e = apr_bucket_immortal_create(gzip_header, sizeof gzip_header,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
/* initialize deflate output buffer */
ctx->stream.next_out = ctx->buffer;
ctx->stream.avail_out = c->bufferSize;
+ } else if (!ctx->filter_init) {
+ /* Hmm. We've run through the filter init before as we have a ctx,
+ * but we never initialized. We probably have a dangling ref. Bail.
+ */
+ return ap_pass_brigade(f->next, bb);
}
while (!APR_BRIGADE_EMPTY(bb))
{
- const char *data;
apr_bucket *b;
- apr_size_t len;
+
+ /*
+ * Optimization: If we are a HEAD request and bytes_sent is not zero
+ * it means that we have passed the content-length filter once and
+ * have more data to sent. This means that the content-length filter
+ * could not determine our content-length for the response to the
+ * HEAD request anyway (the associated GET request would deliver the
+ * body in chunked encoding) and we can stop compressing.
+ */
+ if (r->header_only && r->bytes_sent) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
e = APR_BRIGADE_FIRST(bb);
b = apr_bucket_pool_create(buf, VALIDATION_SIZE, r->pool,
f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01384)
"Zlib: Compressed %ld to %ld : URL %s",
ctx->stream.total_in, ctx->stream.total_out, r->uri);
zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, deflate,
Z_SYNC_FLUSH, NO_UPDATE_CRC);
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01385)
+ "Zlib error %d flushing zlib output buffer (%s)",
+ zRC, ctx->stream.msg);
return APR_EGENERAL;
}
continue;
}
- if (AP_BUCKET_IS_ERROR(e)) {
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
+ if (APR_BUCKET_IS_METADATA(e)) {
+ /*
+ * Remove meta data bucket from old brigade and insert into the
+ * new.
+ */
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
+ continue;
}
/* read */
zRC = deflate(&(ctx->stream), Z_NO_FLUSH);
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01386)
+ "Zlib error %d deflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
}
c = ap_get_module_config(r->server->module_config, &deflate_module);
if (!ctx) {
- int found = 0;
- char *token, deflate_hdr[10];
- const char *encoding;
+ char deflate_hdr[10];
apr_size_t len;
/* only work on main request/no subrequests */
return ap_get_brigade(f->next, bb, mode, block, readbytes);
}
- /* Let's see what our current Content-Encoding is.
- * If gzip is present, don't gzip again. (We could, but let's not.)
- */
- encoding = apr_table_get(r->headers_in, "Content-Encoding");
- if (encoding) {
- const char *tmp = encoding;
-
- token = ap_get_token(r->pool, &tmp, 0);
- while (token && token[0]) {
- if (!strcasecmp(token, "gzip")) {
- found = 1;
- break;
- }
- /* Otherwise, skip token */
- tmp++;
- token = ap_get_token(r->pool, &tmp, 0);
- }
+ /* We can't operate on Content-Ranges */
+ if (apr_table_get(r->headers_in, "Content-Range") != NULL) {
+ ap_remove_input_filter(f);
+ return ap_get_brigade(f->next, bb, mode, block, readbytes);
}
- if (found == 0) {
+ /* Check whether request body is gzipped.
+ *
+ * If it is, we're transforming the contents, invalidating
+ * some request headers including Content-Encoding.
+ *
+ * If not, we just remove ourself.
+ */
+ if (check_gzip(r, r->headers_in, NULL) == 0) {
ap_remove_input_filter(f);
return ap_get_brigade(f->next, bb, mode, block, readbytes);
}
return rv;
}
+ apr_table_unset(r->headers_in, "Content-Length");
+ apr_table_unset(r->headers_in, "Content-MD5");
+
len = 10;
rv = apr_brigade_flatten(ctx->bb, deflate_hdr, &len);
if (rv != APR_SUCCESS) {
if (len != 10 ||
deflate_hdr[0] != deflate_magic[0] ||
deflate_hdr[1] != deflate_magic[1]) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01387) "Zlib: Invalid header");
return APR_EGENERAL;
}
/* We can't handle flags for now. */
if (deflate_hdr[3] != 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01388)
+ "Zlib: Unsupported flags %02x", (int)deflate_hdr[3]);
return APR_EGENERAL;
}
if (zRC != Z_OK) {
f->ctx = NULL;
inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01389)
"unable to init Zlib: "
"inflateInit2 returned %d: URL %s",
zRC, r->uri);
/* If we actually see the EOS, that means we screwed up! */
if (APR_BUCKET_IS_EOS(bkt)) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01390)
+ "Encountered EOS bucket in inflate filter (bug?)");
return APR_EGENERAL;
}
zRC = inflate(&(ctx->stream), Z_SYNC_FLUSH);
if (zRC != Z_OK) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01391)
+ "Zlib error %d inflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
if (zRC != Z_OK) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01392)
+ "Zlib error %d inflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
}
if (zRC == Z_STREAM_END) {
apr_bucket *tmp_heap, *eos;
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01393)
"Zlib: Inflated %ld to %ld : URL %s",
ctx->stream.total_in, ctx->stream.total_out,
r->uri);
compCRC = getLong(ctx->stream.next_in);
if (ctx->crc != compCRC) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01394)
+ "Zlib: CRC error inflating data");
return APR_EGENERAL;
}
ctx->stream.next_in += 4;
compLen = getLong(ctx->stream.next_in);
if (ctx->stream.total_out != compLen) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01395)
+ "Zlib: Length %ld of inflated data does "
+ "not match expected value %ld",
+ ctx->stream.total_out, compLen);
return APR_EGENERAL;
}
}
/* FIXME: We need to grab the 8 verification bytes
* from the wire! */
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01396)
+ "Verification data not available (bug?)");
return APR_EGENERAL;
}
}
if (!APR_BRIGADE_EMPTY(ctx->proc_bb)) {
- apr_bucket_brigade *newbb;
-
- /* May return APR_INCOMPLETE which is fine by us. */
- apr_brigade_partition(ctx->proc_bb, readbytes, &bkt);
-
- newbb = apr_brigade_split(ctx->proc_bb, bkt);
- APR_BRIGADE_CONCAT(bb, ctx->proc_bb);
- APR_BRIGADE_CONCAT(ctx->proc_bb, newbb);
+ if (apr_brigade_partition(ctx->proc_bb, readbytes, &bkt) == APR_INCOMPLETE) {
+ APR_BRIGADE_CONCAT(bb, ctx->proc_bb);
+ }
+ else {
+ APR_BRIGADE_CONCAT(bb, ctx->proc_bb);
+ apr_brigade_split_ex(bb, bkt, ctx->proc_bb);
+ }
}
return APR_SUCCESS;
{
int zlib_method;
int zlib_flags;
- int inflate_init = 1;
apr_bucket *e;
request_rec *r = f->r;
deflate_ctx *ctx = f->ctx;
/* Do nothing if asked to filter nothing. */
if (APR_BRIGADE_EMPTY(bb)) {
- return ap_pass_brigade(f->next, bb);
+ return APR_SUCCESS;
}
c = ap_get_module_config(r->server->module_config, &deflate_module);
if (!ctx) {
- int found = 0;
- char *token;
- const char *encoding;
- /* only work on main request/no subrequests */
- if (!ap_is_initial_req(r)) {
+ /*
+ * Only work on main request, not subrequests,
+ * that are not a 204 response with no content
+ * and not a partial response to a Range request,
+ * and only when Content-Encoding ends in gzip.
+ */
+ if (!ap_is_initial_req(r) || (r->status == HTTP_NO_CONTENT) ||
+ (apr_table_get(r->headers_out, "Content-Range") != NULL) ||
+ (check_gzip(r, r->headers_out, r->err_headers_out) == 0)
+ ) {
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
/*
- * Let's see what our current Content-Encoding is.
- * Only inflate if gzip is present.
+ * At this point we have decided to filter the content, so change
+ * important content metadata before sending any response out.
+ * Content-Encoding was already reset by the check_gzip() call.
*/
- encoding = apr_table_get(r->headers_out, "Content-Encoding");
- if (encoding) {
- const char *tmp = encoding;
-
- token = ap_get_token(r->pool, &tmp, 0);
- while (token && token[0]) {
- if (!strcasecmp(token, "gzip")) {
- found = 1;
- break;
- }
- /* Otherwise, skip token */
- tmp++;
- token = ap_get_token(r->pool, &tmp, 0);
- }
- }
-
- if (found == 0) {
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
- }
- apr_table_unset(r->headers_out, "Content-Encoding");
+ apr_table_unset(r->headers_out, "Content-Length");
+ apr_table_unset(r->headers_out, "Content-MD5");
+ deflate_check_etag(r, "gunzip");
- /* No need to inflate HEAD or 204/304 */
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(bb))) {
+ /* For a 304 response, only change the headers */
+ if (r->status == HTTP_NOT_MODIFIED) {
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
-
f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
ctx->buffer = apr_palloc(r->pool, c->bufferSize);
if (zRC != Z_OK) {
f->ctx = NULL;
inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01397)
"unable to init Zlib: "
"inflateInit2 returned %d: URL %s",
zRC, r->uri);
apr_pool_cleanup_register(r->pool, ctx, deflate_ctx_cleanup,
apr_pool_cleanup_null);
- apr_table_unset(r->headers_out, "Content-Length");
-
/* initialize inflate output buffer */
ctx->stream.next_out = ctx->buffer;
ctx->stream.avail_out = c->bufferSize;
- inflate_init = 0;
+ ctx->inflate_init = 0;
}
while (!APR_BRIGADE_EMPTY(bb))
*/
flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate, Z_SYNC_FLUSH,
UPDATE_CRC);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01398)
"Zlib: Inflated %ld to %ld : URL %s",
ctx->stream.total_in, ctx->stream.total_out, r->uri);
unsigned long compCRC, compLen;
compCRC = getLong(ctx->validation_buffer);
if (ctx->crc != compCRC) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01399)
"Zlib: Checksum of inflated stream invalid");
return APR_EGENERAL;
}
ctx->validation_buffer += VALIDATION_SIZE / 2;
compLen = getLong(ctx->validation_buffer);
if (ctx->stream.total_out != compLen) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01400)
"Zlib: Length of inflated stream invalid");
return APR_EGENERAL;
}
}
else {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01401)
"Zlib: Validation bytes not present");
return APR_EGENERAL;
}
zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate,
Z_SYNC_FLUSH, UPDATE_CRC);
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01402)
+ "Zlib error %d flushing inflate buffer (%s)",
+ zRC, ctx->stream.msg);
return APR_EGENERAL;
}
continue;
}
+ if (APR_BUCKET_IS_METADATA(e)) {
+ /*
+ * Remove meta data bucket from old brigade and insert into the
+ * new.
+ */
+ APR_BUCKET_REMOVE(e);
+ APR_BRIGADE_INSERT_TAIL(ctx->bb, e);
+ continue;
+ }
+
/* read */
apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
/* first bucket contains zlib header */
- if (!inflate_init++) {
+ if (!ctx->inflate_init++) {
if (len < 10) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01403)
"Insufficient data for inflate");
return APR_EGENERAL;
}
zlib_method = data[2];
zlib_flags = data[3];
if (zlib_method != Z_DEFLATED) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01404)
"inflate: data not deflated!");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
if (data[0] != deflate_magic[0] ||
data[1] != deflate_magic[1] ||
(zlib_flags & RESERVED) != 0) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01405)
"inflate: bad header");
return APR_EGENERAL ;
}
bytes += ((unsigned int)(data[1])) << 8;
bytes += 2;
if (len < bytes) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01406)
"inflate: extra field too big (not "
"supported)");
return APR_EGENERAL;
ctx->validation_buffer_length += copy_size;
}
if (ctx->stream.avail_in) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01407)
"Zlib: %d bytes of garbage at the end of "
"compressed stream.", ctx->stream.avail_in);
/*
}
}
- zRC = Z_OK;
-
while (ctx->stream.avail_in != 0) {
if (ctx->stream.avail_out == 0) {
VALIDATION_SIZE);
if (ctx->stream.avail_in > VALIDATION_SIZE) {
ctx->validation_buffer_length = VALIDATION_SIZE;
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01408)
"Zlib: %d bytes of garbage at the end of "
"compressed stream.",
ctx->stream.avail_in - VALIDATION_SIZE);
}
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01409)
+ "Zlib error %d inflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
}
{NULL}
};
-module AP_MODULE_DECLARE_DATA deflate_module = {
+AP_DECLARE_MODULE(deflate) = {
STANDARD20_MODULE_STUFF,
NULL, /* dir config creater */
NULL, /* dir merger --- default is to override */