From: Ruediger Pluem Date: Fri, 21 Oct 2005 13:54:38 +0000 (+0000) Subject: * Fix PR37100 (SEGV in mod_proxy_ajp), by sending the data up the filter X-Git-Tag: 2.3.0~2851 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=a9d2ab75d0c8034df985f59bdb2e8600372d9d94;p=apache * Fix PR37100 (SEGV in mod_proxy_ajp), by sending the data up the filter chain immediately instead of spooling it completely before passing it to the filter chain. It contains a bandaid to handle intentional flushes from Tomcat side. Further explanation in code and report. ajp.h: Add ajp_msg_reuse prototype mod_proxy_ajp.c: Adjust logic of ap_proxy_ajp_request ajp_msg.c: Add ajp_msg_reuse ajp_header.c: Adjusting logic of ajp_read_header git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@327185 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/CHANGES b/CHANGES index 645318a4d3..c172903a47 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,9 @@ Changes with Apache 2.3.0 [Remove entries to the current 2.0 and 2.2 section below, when backported] + *) mod_proxy_ajp: Do not spool the entire response from AJP backend before + sending it up the filter chain. PR37100. [Ruediger Pluem] + *) core: AddOutputFilterByType is ignored for proxied requests. PR31226. [Joe Orton, Ruediger Pluem] diff --git a/modules/proxy/ajp.h b/modules/proxy/ajp.h index 8a92fa5bda..58bf173853 100644 --- a/modules/proxy/ajp.h +++ b/modules/proxy/ajp.h @@ -35,6 +35,7 @@ #include "apr_buckets.h" #include "apr_md5.h" #include "apr_network_io.h" +#include "apr_poll.h" #include "apr_pools.h" #include "apr_strings.h" #include "apr_uri.h" @@ -186,6 +187,14 @@ apr_status_t ajp_msg_check_header(ajp_msg_t *msg, apr_size_t *len); */ apr_status_t ajp_msg_reset(ajp_msg_t *msg); +/** + * Reuse an AJP Message + * + * @param msg AJP Message to reuse + * @return APR_SUCCESS or error + */ +apr_status_t ajp_msg_reuse(ajp_msg_t *msg); + /** * Mark the end of an AJP Message * diff --git a/modules/proxy/ajp_header.c b/modules/proxy/ajp_header.c index 03b8f4d0a6..a72183a415 100644 --- a/modules/proxy/ajp_header.c +++ b/modules/proxy/ajp_header.c @@ -615,12 +615,22 @@ apr_status_t ajp_read_header(apr_socket_t *sock, { apr_byte_t result; apr_status_t rc; - - rc = ajp_msg_create(r->pool, msg); - if (rc != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, - "ajp_read_header: ajp_msg_create failed"); - return rc; + + if (*msg) { + rc = ajp_msg_reuse(*msg); + if (rc != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, + "ajp_read_header: ajp_msg_reuse failed"); + return rc; + } + } + else { + rc = ajp_msg_create(r->pool, msg); + if (rc != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, + "ajp_read_header: ajp_msg_create failed"); + return rc; + } } ajp_msg_reset(*msg); rc = ajp_ilink_receive(sock, *msg); diff --git a/modules/proxy/ajp_msg.c b/modules/proxy/ajp_msg.c index 0696f9c7fd..21bda25241 100644 --- a/modules/proxy/ajp_msg.c +++ b/modules/proxy/ajp_msg.c @@ -138,6 +138,24 @@ apr_status_t ajp_msg_reset(ajp_msg_t *msg) return APR_SUCCESS; } +/** + * Reuse an AJP Message + * + * @param msg AJP Message to reuse + * @return APR_SUCCESS or error + */ +apr_status_t ajp_msg_reuse(ajp_msg_t *msg) +{ + apr_byte_t *buf; + + buf = msg->buf; + memset(msg, 0, sizeof(ajp_msg_t)); + msg->buf = buf; + msg->header_len = AJP_HEADER_LEN; + ajp_msg_reset(msg); + return APR_SUCCESS; +} + /** * Mark the end of an AJP Message * diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c index 9f8975bf34..afee93eafb 100644 --- a/modules/proxy/mod_proxy_ajp.c +++ b/modules/proxy/mod_proxy_ajp.c @@ -89,6 +89,32 @@ static int proxy_ajp_canon(request_rec *r, char *url) return OK; } +/* + * XXX: Flushing bandaid + * + * When processing CMD_AJP13_SEND_BODY_CHUNK AJP messages we will do a poll + * with FLUSH_WAIT miliseconds timeout to determine if more data is currently + * available at the backend. If there is no more data available, we flush + * the data to the client by adding a flush bucket to the brigade we pass + * up the filter chain. + * This is only a bandaid to fix the AJP/1.3 protocol shortcoming of not + * sending (actually not having defined) a flush message, when the data + * should be flushed to the client. As soon as this protocol shortcoming is + * fixed this code should be removed. + * + * For further discussion see PR37100. + * http://issues.apache.org/bugzilla/show_bug.cgi?id=37100 + */ +#define FLUSHING_BANDAID 1 + +#ifdef FLUSHING_BANDAID +/* + * Wait 10000 microseconds to find out if more data is currently + * available at the backend. Just an arbitrary choose. + */ +#define FLUSH_WAIT 10000 +#endif + /* * process the request and write the response. */ @@ -112,6 +138,10 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, int havebody = 1; int isok = 1; apr_off_t bb_len; +#ifdef FLUSHING_BANDAID + apr_int32_t conn_poll_fd; + apr_pollfd_t *conn_poll; +#endif /* * Send the AJP request to the remote server @@ -134,6 +164,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, /* allocate an AJP message to store the data of the buckets */ status = ajp_alloc_data_msg(r->pool, &buff, &bufsiz, &msg); if (status != APR_SUCCESS) { + /* We had a failure: Close connection to backend */ + conn->close++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: ajp_alloc_data_msg failed"); return HTTP_INTERNAL_SERVER_ERROR; @@ -171,6 +203,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, status = apr_brigade_flatten(input_brigade, buff, &bufsiz); if (status != APR_SUCCESS) { + /* We had a failure: Close connection to backend */ + conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: apr_brigade_flatten"); @@ -183,6 +217,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, if (bufsiz > 0) { status = ajp_send_data_msg(conn->sock, msg, bufsiz); if (status != APR_SUCCESS) { + /* We had a failure: Close connection to backend */ + conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: send failed to %pI (%s)", @@ -195,9 +231,12 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, } /* read the response */ + conn->data = NULL; status = ajp_read_header(conn->sock, r, (ajp_msg_t **)&(conn->data)); if (status != APR_SUCCESS) { + /* We had a failure: Close connection to backend */ + conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: read response failed from %pI (%s)", @@ -209,6 +248,18 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, result = ajp_parse_type(r, conn->data); output_brigade = apr_brigade_create(p, r->connection->bucket_alloc); +#ifdef FLUSHING_BANDAID + /* + * Prepare apr_pollfd_t struct for later check if there is currently + * data available from the backend (do not flush response to client) + * or not (flush response to client) + */ + conn_poll = apr_pcalloc(p, sizeof(apr_pollfd_t)); + conn_poll->reqevents = APR_POLLIN; + conn_poll->desc_type = APR_POLL_SOCKET; + conn_poll->desc.s = conn->sock; +#endif + bufsiz = AJP13_MAX_SEND_BODY_SZ; while (isok) { switch (result) { @@ -272,16 +323,42 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, case CMD_AJP13_SEND_BODY_CHUNK: /* AJP13_SEND_BODY_CHUNK: piece of data */ status = ajp_parse_data(r, conn->data, &size, &buff); - e = apr_bucket_transient_create(buff, size, - r->connection->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(output_brigade, e); - if (status != APR_SUCCESS) + if (status == APR_SUCCESS) { + e = apr_bucket_transient_create(buff, size, + r->connection->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(output_brigade, e); + +#ifdef FLUSHING_BANDAID + /* + * If there is no more data available from backend side + * currently, flush response to client. + */ + if (apr_poll(conn_poll, 1, &conn_poll_fd, FLUSH_WAIT) + == APR_TIMEUP) { + e = apr_bucket_flush_create(r->connection->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(output_brigade, e); + } +#endif + apr_brigade_length(output_brigade, 0, &bb_len); + if (bb_len != -1) + conn->worker->s->read += bb_len; + if (ap_pass_brigade(r->output_filters, + output_brigade) != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, + "proxy: error processing body"); + isok = 0; + } + apr_brigade_cleanup(output_brigade); + } + else { isok = 0; + } break; case CMD_AJP13_END_RESPONSE: e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); - if (ap_pass_brigade(r->output_filters, output_brigade) != APR_SUCCESS) { + if (ap_pass_brigade(r->output_filters, + output_brigade) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "proxy: error processing body"); isok = 0; @@ -291,6 +368,19 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, isok = 0; break; } + + /* + * If connection has been aborted by client: Stop working. + * Nevertheless, we regard our operation so far as a success: + * So do not set isok to 0 and set result to CMD_AJP13_END_RESPONSE + * But: Close this connection to the backend. + */ + if (r->connection->aborted) { + conn->close++; + result = CMD_AJP13_END_RESPONSE; + break; + } + if (!isok) break; @@ -310,14 +400,11 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, } apr_brigade_destroy(input_brigade); - apr_brigade_length(output_brigade, 0, &bb_len); - if (bb_len != -1) - conn->worker->s->read += bb_len; - - if (!isok) - apr_brigade_destroy(output_brigade); + apr_brigade_destroy(output_brigade); if (status != APR_SUCCESS) { + /* We had a failure: Close connection to backend */ + conn->close++; ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: send body failed to %pI (%s)", conn->worker->cp->addr, @@ -340,6 +427,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, conn->worker->cp->addr, conn->worker->hostname); + /* We had a failure: Close connection to backend */ + conn->close++; return HTTP_SERVICE_UNAVAILABLE; }