unsigned char *validation_buffer;
apr_size_t validation_buffer_length;
int inflate_init;
+ int filter_init;
} deflate_ctx;
/* Number of validation bytes (CRC and length) after the compressed data */
apr_table_setn(r->headers_out, "ETag", newtag);
}
- }
+ }
}
static apr_status_t deflate_out_filter(ap_filter_t *f,
request_rec *r = f->r;
deflate_ctx *ctx = f->ctx;
int zRC;
+ apr_size_t len = 0, blen;
+ const char *data;
deflate_filter_config *c;
/* Do nothing if asked to filter nothing. */
char *token;
const char *encoding;
+ /* We have checked above that bb is not empty */
+ e = APR_BRIGADE_LAST(bb);
+ if (APR_BUCKET_IS_EOS(e)) {
+ /*
+ * If we already know the size of the response, we can skip
+ * compression on responses smaller than the compression overhead.
+ * However, if we compress, we must initialize deflate_out before
+ * calling ap_pass_brigade() for the first time. Otherwise the
+ * headers will be sent to the client without
+ * "Content-Encoding: gzip".
+ */
+ e = APR_BRIGADE_FIRST(bb);
+ while (1) {
+ apr_status_t rc;
+ if (APR_BUCKET_IS_EOS(e)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing very small response of %"
+ APR_SIZE_T_FMT " bytes", len);
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+ if (APR_BUCKET_IS_METADATA(e)) {
+ e = APR_BUCKET_NEXT(e);
+ continue;
+ }
+
+ rc = apr_bucket_read(e, &data, &blen, APR_BLOCK_READ);
+ if (rc != APR_SUCCESS)
+ return rc;
+ len += blen;
+ /* 50 is for Content-Encoding and Vary headers and ETag suffix */
+ if (len > sizeof(gzip_header) + VALIDATION_SIZE + 50)
+ break;
+
+ e = APR_BUCKET_NEXT(e);
+ }
+ }
+
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+
/*
* Only work on main request, not subrequests,
* that are not a 204 response with no content
apr_table_get(r->subprocess_env, "no-gzip") ||
apr_table_get(r->headers_out, "Content-Range")
) {
+ if (APLOG_R_IS_LEVEL(r, APLOG_TRACE1)) {
+ const char *reason =
+ (r->main != NULL) ? "subrequest" :
+ (r->status == HTTP_NO_CONTENT) ? "no content" :
+ apr_table_get(r->subprocess_env, "no-gzip") ? "no-gzip" :
+ "content-range";
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing (%s)", reason);
+ }
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
const char *env_value = apr_table_get(r->subprocess_env,
"gzip-only-text/html");
if ( env_value && (strcmp(env_value,"1") == 0) ) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing, (gzip-only-text/html)");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
/* stolen from mod_negotiation: */
if (strcmp(token, "identity") && strcmp(token, "7bit") &&
strcmp(token, "8bit") && strcmp(token, "binary")) {
-
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing (content-encoding already "
+ " set: %s)", token);
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
/* skip parameters, XXX: ;q=foo evaluation? */
while (*accepts == ';') {
++accepts;
- token = ap_get_token(r->pool, &accepts, 1);
+ ap_get_token(r->pool, &accepts, 1);
}
/* retrieve next token */
/* No acceptable token found. */
if (token == NULL || token[0] == '\0') {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Not compressing (no Accept-Encoding: gzip)");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
}
}
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Forcing compression (force-gzip set)");
+ }
/* At this point we have decided to filter the content. Let's try to
* to initialize zlib (except for 304 responses, where we will only
*/
if (r->status != HTTP_NOT_MODIFIED) {
- ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
ctx->buffer = apr_palloc(r->pool, c->bufferSize);
ctx->libz_end_func = deflateEnd;
if (zRC != Z_OK) {
deflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01383)
"unable to init Zlib: "
"deflateInit2 returned %d: URL %s",
zRC, r->uri);
*/
apr_pool_cleanup_register(r->pool, ctx, deflate_ctx_cleanup,
apr_pool_cleanup_null);
+
+ /* Set the filter init flag so subsequent invocations know we are
+ * active.
+ */
+ ctx->filter_init = 1;
}
/*
/* initialize deflate output buffer */
ctx->stream.next_out = ctx->buffer;
ctx->stream.avail_out = c->bufferSize;
+ } else if (!ctx->filter_init) {
+ /* Hmm. We've run through the filter init before as we have a ctx,
+ * but we never initialized. We probably have a dangling ref. Bail.
+ */
+ return ap_pass_brigade(f->next, bb);
}
while (!APR_BRIGADE_EMPTY(bb))
{
- const char *data;
apr_bucket *b;
- apr_size_t len;
+
+ /*
+ * Optimization: If we are a HEAD request and bytes_sent is not zero
+ * it means that we have passed the content-length filter once and
+ * have more data to sent. This means that the content-length filter
+ * could not determine our content-length for the response to the
+ * HEAD request anyway (the associated GET request would deliver the
+ * body in chunked encoding) and we can stop compressing.
+ */
+ if (r->header_only && r->bytes_sent) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
e = APR_BRIGADE_FIRST(bb);
b = apr_bucket_pool_create(buf, VALIDATION_SIZE, r->pool,
f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01384)
"Zlib: Compressed %ld to %ld : URL %s",
ctx->stream.total_in, ctx->stream.total_out, r->uri);
zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, deflate,
Z_SYNC_FLUSH, NO_UPDATE_CRC);
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01385)
+ "Zlib error %d flushing zlib output buffer (%s)",
+ zRC, ctx->stream.msg);
return APR_EGENERAL;
}
zRC = deflate(&(ctx->stream), Z_NO_FLUSH);
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01386)
+ "Zlib error %d deflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
}
if (len != 10 ||
deflate_hdr[0] != deflate_magic[0] ||
deflate_hdr[1] != deflate_magic[1]) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01387) "Zlib: Invalid header");
return APR_EGENERAL;
}
/* We can't handle flags for now. */
if (deflate_hdr[3] != 0) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01388)
+ "Zlib: Unsupported flags %02x", (int)deflate_hdr[3]);
return APR_EGENERAL;
}
if (zRC != Z_OK) {
f->ctx = NULL;
inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01389)
"unable to init Zlib: "
"inflateInit2 returned %d: URL %s",
zRC, r->uri);
/* If we actually see the EOS, that means we screwed up! */
if (APR_BUCKET_IS_EOS(bkt)) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01390)
+ "Encountered EOS bucket in inflate filter (bug?)");
return APR_EGENERAL;
}
zRC = inflate(&(ctx->stream), Z_SYNC_FLUSH);
if (zRC != Z_OK) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01391)
+ "Zlib error %d inflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
if (zRC != Z_OK) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01392)
+ "Zlib error %d inflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
}
if (zRC == Z_STREAM_END) {
apr_bucket *tmp_heap, *eos;
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01393)
"Zlib: Inflated %ld to %ld : URL %s",
ctx->stream.total_in, ctx->stream.total_out,
r->uri);
compCRC = getLong(ctx->stream.next_in);
if (ctx->crc != compCRC) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01394)
+ "Zlib: CRC error inflating data");
return APR_EGENERAL;
}
ctx->stream.next_in += 4;
compLen = getLong(ctx->stream.next_in);
if (ctx->stream.total_out != compLen) {
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01395)
+ "Zlib: Length %ld of inflated data does "
+ "not match expected value %ld",
+ ctx->stream.total_out, compLen);
return APR_EGENERAL;
}
}
/* FIXME: We need to grab the 8 verification bytes
* from the wire! */
inflateEnd(&ctx->stream);
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01396)
+ "Verification data not available (bug?)");
return APR_EGENERAL;
}
if (zRC != Z_OK) {
f->ctx = NULL;
inflateEnd(&ctx->stream);
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01397)
"unable to init Zlib: "
"inflateInit2 returned %d: URL %s",
zRC, r->uri);
*/
flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate, Z_SYNC_FLUSH,
UPDATE_CRC);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01398)
"Zlib: Inflated %ld to %ld : URL %s",
ctx->stream.total_in, ctx->stream.total_out, r->uri);
unsigned long compCRC, compLen;
compCRC = getLong(ctx->validation_buffer);
if (ctx->crc != compCRC) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01399)
"Zlib: Checksum of inflated stream invalid");
return APR_EGENERAL;
}
ctx->validation_buffer += VALIDATION_SIZE / 2;
compLen = getLong(ctx->validation_buffer);
if (ctx->stream.total_out != compLen) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01400)
"Zlib: Length of inflated stream invalid");
return APR_EGENERAL;
}
}
else {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01401)
"Zlib: Validation bytes not present");
return APR_EGENERAL;
}
zRC = flush_libz_buffer(ctx, c, f->c->bucket_alloc, inflate,
Z_SYNC_FLUSH, UPDATE_CRC);
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01402)
+ "Zlib error %d flushing inflate buffer (%s)",
+ zRC, ctx->stream.msg);
return APR_EGENERAL;
}
/* first bucket contains zlib header */
if (!ctx->inflate_init++) {
if (len < 10) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01403)
"Insufficient data for inflate");
return APR_EGENERAL;
}
zlib_method = data[2];
zlib_flags = data[3];
if (zlib_method != Z_DEFLATED) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01404)
"inflate: data not deflated!");
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, bb);
if (data[0] != deflate_magic[0] ||
data[1] != deflate_magic[1] ||
(zlib_flags & RESERVED) != 0) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01405)
"inflate: bad header");
return APR_EGENERAL ;
}
bytes += ((unsigned int)(data[1])) << 8;
bytes += 2;
if (len < bytes) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01406)
"inflate: extra field too big (not "
"supported)");
return APR_EGENERAL;
ctx->validation_buffer_length += copy_size;
}
if (ctx->stream.avail_in) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01407)
"Zlib: %d bytes of garbage at the end of "
"compressed stream.", ctx->stream.avail_in);
/*
}
}
- zRC = Z_OK;
-
while (ctx->stream.avail_in != 0) {
if (ctx->stream.avail_out == 0) {
VALIDATION_SIZE);
if (ctx->stream.avail_in > VALIDATION_SIZE) {
ctx->validation_buffer_length = VALIDATION_SIZE;
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01408)
"Zlib: %d bytes of garbage at the end of "
"compressed stream.",
ctx->stream.avail_in - VALIDATION_SIZE);
}
if (zRC != Z_OK) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01409)
+ "Zlib error %d inflating data (%s)", zRC,
+ ctx->stream.msg);
return APR_EGENERAL;
}
}
{NULL}
};
-module AP_MODULE_DECLARE_DATA deflate_module = {
+AP_DECLARE_MODULE(deflate) = {
STANDARD20_MODULE_STUFF,
NULL, /* dir config creater */
NULL, /* dir merger --- default is to override */