APR_BRIGADE_INSERT_TAIL(bb, b);
if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS)
- return HTTP_INTERNAL_SERVER_ERROR;
+ return AP_FILTER_ERROR;
#endif
return OK;
}
APR_BRIGADE_INSERT_TAIL(bb, b);
if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS)
- return HTTP_INTERNAL_SERVER_ERROR;
+ return AP_FILTER_ERROR;
#endif
return OK;
}
input_brigade = apr_brigade_create(r->connection->pool, r->connection->bucket_alloc);
status = ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ, MAX_MSG_LEN);
if (status != APR_SUCCESS) {
- return HTTP_INTERNAL_SERVER_ERROR;
+ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
}
apr_brigade_flatten(input_brigade, buf, &len);
APR_BRIGADE_INSERT_TAIL(bb, bkt);
if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS) {
- return dav_new_error(pool, HTTP_FORBIDDEN, 0, status,
+ return dav_new_error(pool, AP_FILTER_ERROR, 0, status,
"Could not write contents to filter.");
}
/* log the errors */
dav_log_err(r, err, APLOG_ERR);
+ if (!ap_is_HTTP_VALID_RESPONSE(err->status)) {
+ /* we have responded already */
+ return AP_FILTER_ERROR;
+ }
+
if (response == NULL) {
dav_error *stackerr = err;
"(URI: %s)", msg);
}
else {
- /* XXX: should this actually be HTTP_BAD_REQUEST? */
- http_err = ap_map_http_request_error(rc,
- HTTP_INTERNAL_SERVER_ERROR);
+ http_err = ap_map_http_request_error(rc, HTTP_BAD_REQUEST);
msg = apr_psprintf(r->pool,
"An error occurred while reading"
" the request body (URI: %s)", msg);
APR_BLOCK_READ, HUGE_STRING_LEN);
if (status != APR_SUCCESS) {
- if (status == AP_FILTER_ERROR) {
- apr_brigade_destroy(bbin);
- return status;
- }
- else {
- apr_brigade_destroy(bbin);
- return HTTP_BAD_REQUEST;
- }
+ apr_brigade_destroy(bbin);
+ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
}
for (bucket = APR_BRIGADE_FIRST(bbin);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(01410)
"reflector_handler: ap_pass_brigade returned %i",
status);
- return HTTP_INTERNAL_SERVER_ERROR;
+ return AP_FILTER_ERROR;
}
}
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01236)
"mod_asis: ap_pass_brigade failed for file %s", r->filename);
- return HTTP_INTERNAL_SERVER_ERROR;
+ return AP_FILTER_ERROR;
}
}
else {
}
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01225)
"Error reading request entity data");
- return ap_map_http_request_error(rv, HTTP_INTERNAL_SERVER_ERROR);
+ return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
}
for (bucket = APR_BRIGADE_FIRST(bb);
}
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01270)
"Error reading request entity data");
- return ap_map_http_request_error(rv, HTTP_INTERNAL_SERVER_ERROR);
+ return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
}
for (bucket = APR_BRIGADE_FIRST(bb);
e = apr_bucket_flush_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
- ap_pass_brigade(f->c->output_filters, bb);
+ rv = ap_pass_brigade(f->c->output_filters, bb);
+ if (rv != APR_SUCCESS) {
+ return AP_FILTER_ERROR;
+ }
}
}
}
/* We lose the failure code here. This is why ap_get_client_block should
* not be used.
*/
+ if (rv == AP_FILTER_ERROR) {
+ /* AP_FILTER_ERROR means a filter has responded already,
+ * we are DONE.
+ */
+ apr_brigade_destroy(bb);
+ return -1;
+ }
if (rv != APR_SUCCESS) {
apr_bucket *e;
rv = HTTP_REQUEST_TIME_OUT;
}
else if (status == AP_FILTER_ERROR) {
- data_sent = -1;
+ rv = AP_FILTER_ERROR;
}
output_failed = 1;
break;
"output: %i", backend_failed, output_failed);
/* We had a failure: Close connection to backend */
conn->close = 1;
- if (data_sent < 0) {
- /* Return AP_FILTER_ERROR to let ap_die() handle the error */
- rv = AP_FILTER_ERROR;
- data_sent = 0;
- }
- else if (data_sent) {
+ if (data_sent) {
/* Return DONE to avoid error messages being added to the stream */
rv = DONE;
}
/* We had a failure: Close connection to backend */
conn->close = 1;
backend_failed = 1;
- /* Return DONE to avoid error messages being added to the stream */
if (data_sent) {
+ /* Return DONE to avoid error messages being added to the stream */
rv = DONE;
}
}
if (rv) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02015)
"could not read request body for SSL buffer");
- return HTTP_INTERNAL_SERVER_ERROR;
+ return ap_map_http_request_error(rv, HTTP_INTERNAL_SERVER_ERROR);
}
/* Iterate through the returned brigade: setaside each bucket
&& (upgrade = apr_table_get(r->headers_in, "Upgrade")) != NULL
&& ap_find_token(r->pool, upgrade, "TLS/1.0")) {
if (upgrade_connection(r)) {
- return HTTP_INTERNAL_SERVER_ERROR;
+ return AP_FILTER_ERROR;
}
}
apr_brigade_cleanup(db->tmpbb);
- if (status != OK) {
+ if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, db->r, APLOGNO(01867)
"dialup: pulse: ap_pass_brigade failed:");
- return status;
+ return AP_FILTER_ERROR;
}
}
return;
}
else {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, db->r, APLOGNO(01868)
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, db->r, APLOGNO(01868)
"dialup: pulse returned: %d", status);
db->r->status = HTTP_OK;
ap_die(status, db->r);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(00133)
"default_handler: ap_pass_brigade returned %i",
status);
- return HTTP_INTERNAL_SERVER_ERROR;
+ return AP_FILTER_ERROR;
}
}
else { /* unusual method (not GET or POST) */
APR_BUCKET_INIT(b);
b->free = apr_bucket_free;
b->list = list;
+ if (!ap_is_HTTP_VALID_RESPONSE(error)) {
+ error = HTTP_INTERNAL_SERVER_ERROR;
+ }
return ap_bucket_error_make(b, error, buf, p);
}
READ_BLOCKSIZE);
if (status != APR_SUCCESS) {
+ result = ap_map_http_request_error(status, HTTP_BAD_REQUEST);
goto read_error;
}