From: Yann Ylavic Date: Tue, 4 Sep 2018 02:40:49 +0000 (+0000) Subject: core: follow up to r1839997: recycle request filters to a delayed ring first. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=5262e7e73ade018f7849bed760d9fefac9fa9e5f;p=apache core: follow up to r1839997: recycle request filters to a delayed ring first. We want not only ap_filter_output_pending() to be able to access each pending filter's *f after the EOR is destroyed, but also each request filter to do the same until it returns. So request filters are now always cleaned up into a dead_filters ring which is merged into spare_filters only when ap_filter_recycle() is called explicitely, that is in ap_process_request_after_handler() and ap_filter_output_pending(). The former takes care of recycling at the end of the request, with any MPM, while the latter keeps recycling during MPM event's write completion. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1840002 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/include/ap_mmn.h b/include/ap_mmn.h index be63709a52..6218199322 100644 --- a/include/ap_mmn.h +++ b/include/ap_mmn.h @@ -601,6 +601,7 @@ * 20180903.1 (2.5.1-dev) Replace conn_rec pending_{in,out}put_filters by * filter_conn_ctx, remove argument pool from * ap_filter_prepare_brigade() + * 20180903.2 (2.5.1-dev) Add ap_filter_recyle() */ #define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */ @@ -608,7 +609,7 @@ #ifndef MODULE_MAGIC_NUMBER_MAJOR #define MODULE_MAGIC_NUMBER_MAJOR 20180903 #endif -#define MODULE_MAGIC_NUMBER_MINOR 1 /* 0...n */ +#define MODULE_MAGIC_NUMBER_MINOR 2 /* 0...n */ /** * Determine if the server's current MODULE_MAGIC_NUMBER is at least a diff --git a/include/util_filter.h b/include/util_filter.h index 0417d3015c..80646e4d1e 100644 --- a/include/util_filter.h +++ b/include/util_filter.h @@ -643,6 +643,15 @@ AP_DECLARE_NONSTD(int) ap_filter_output_pending(conn_rec *c); */ AP_DECLARE_NONSTD(int) ap_filter_input_pending(conn_rec *c); +/** + * Recycle removed request filters so that they can be reused for filters + * added later on the same connection. This typically should happen after + * each request handling. + * + * @param c The connection. + */ +AP_DECLARE(void) ap_filter_recyle(conn_rec *c); + /** * Flush function for apr_brigade_* calls. This calls ap_pass_brigade * to flush the brigade if the brigade buffer overflows. diff --git a/modules/http/http_request.c b/modules/http/http_request.c index 1413a20c93..ecd59f3a0c 100644 --- a/modules/http/http_request.c +++ b/modules/http/http_request.c @@ -401,6 +401,10 @@ AP_DECLARE(void) ap_process_request_after_handler(request_rec *r) (void)ap_check_pipeline(c, bb, DEFAULT_LIMIT_BLANK_LINES); apr_brigade_cleanup(bb); + if (!c->aborted) { + ap_filter_recyle(c); + } + if (c->cs) { if (c->aborted) { c->cs->state = CONN_STATE_LINGER; diff --git a/server/request.c b/server/request.c index 998924150c..abfabb7cfd 100644 --- a/server/request.c +++ b/server/request.c @@ -2098,7 +2098,6 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_request_core_filter(ap_filter_t *f, APR_BRIGADE_CONCAT(tmp_bb, bb); ap_remove_output_filter(f); seen_eor = 1; - f->r = NULL; } else { /* if the core has set aside data, back off and try later */ @@ -2134,8 +2133,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_request_core_filter(ap_filter_t *f, status = ap_pass_brigade(f->next, tmp_bb); apr_brigade_cleanup(tmp_bb); - if (seen_eor || (status != APR_SUCCESS && - !APR_STATUS_IS_EOF(status))) { + if (status != APR_SUCCESS || seen_eor) { return status; } } diff --git a/server/util_filter.c b/server/util_filter.c index 8101042498..05f6f6e232 100644 --- a/server/util_filter.c +++ b/server/util_filter.c @@ -66,8 +66,7 @@ struct ap_filter_conn_ctx { struct ap_filter_spare_ring *spare_containers, *spare_brigades, *spare_filters, - *spare_flushes; - int flushing; + *dead_filters; }; typedef struct filter_trie_node filter_trie_node; @@ -356,19 +355,43 @@ static void put_spare(conn_rec *c, void *data, APR_RING_INSERT_TAIL(*ring, sdata, spare_data, link); } -static apr_status_t filter_recycle(void *arg) +static apr_status_t request_filter_cleanup(void *arg) { ap_filter_t *f = arg; conn_rec *c = f->c; struct ap_filter_conn_ctx *x = c->filter_conn_ctx; - memset(f, 0, sizeof(*f)); - put_spare(c, f, x->flushing ? &x->spare_flushes - : &x->spare_filters); + /* A request filter is cleaned up with an EOR bucket, so possibly + * while it is handling/passing the EOR, and we want each filter or + * ap_filter_output_pending() to be able to dereference f until they + * return. So request filters are recycled in dead_filters and will only + * be moved to spare_filters when ap_filter_recycle() is called explicitly. + * Set f->r to NULL still for any use after free to crash quite reliably. + */ + f->r = NULL; + put_spare(c, f, &x->dead_filters); return APR_SUCCESS; } +AP_DECLARE(void) ap_filter_recyle(conn_rec *c) +{ + struct ap_filter_conn_ctx *x = get_conn_ctx(c); + + if (!x || !x->dead_filters) { + return; + } + + make_spare_ring(&x->spare_filters, c->pool); + while (!APR_RING_EMPTY(x->dead_filters, spare_data, link)) { + struct spare_data *sdata = APR_RING_FIRST(x->dead_filters); + + APR_RING_REMOVE(sdata, link); + memset(sdata->data, 0, sizeof(ap_filter_t)); + APR_RING_INSERT_TAIL(x->spare_filters, sdata, spare_data, link); + } +} + static ap_filter_t *add_any_filter_handle(ap_filter_rec_t *frec, void *ctx, request_rec *r, conn_rec *c, ap_filter_t **r_filters, @@ -413,7 +436,7 @@ static ap_filter_t *add_any_filter_handle(ap_filter_rec_t *frec, void *ctx, f->ctx = ctx; /* f->r must always be NULL for connection filters */ if (r && frec->ftype < AP_FTYPE_CONNECTION) { - apr_pool_cleanup_register(r->pool, f, filter_recycle, + apr_pool_cleanup_register(r->pool, f, request_filter_cleanup, apr_pool_cleanup_null); f->r = r; } @@ -1113,16 +1136,6 @@ AP_DECLARE_NONSTD(int) ap_filter_output_pending(conn_rec *c) return DECLINED; } - /* Flushing pending filters' brigades, so any f->r->pool may be cleaned up - * with its EOR (underneath this loop). Tell filter_recycle() we want f->r - * filters to be recycled in spare_flushes rather than spare_filters, such - * that they can *not* be reused during the flush(es), so we can safely - * access *f after it's been recycled (i.e. to check f->bb). After the - * flush(es), we can then merge spare_flushes into spare_filters to make - * them finally reusable. - */ - x->flushing = 1; - bb = ap_reuse_brigade_from_pool("ap_fop_bb", c->pool, c->bucket_alloc); @@ -1160,13 +1173,10 @@ AP_DECLARE_NONSTD(int) ap_filter_output_pending(conn_rec *c) } } - /* No more flushing, make spare_flushes reusable before leaving. */ - if (x->spare_flushes && !APR_RING_EMPTY(x->spare_flushes, - spare_data, link)) { - make_spare_ring(&x->spare_filters, c->pool); - APR_RING_CONCAT(x->spare_filters, x->spare_flushes, spare_data, link); - } - x->flushing = 0; + /* No more flushing, all filters have returned, we can recycle dead request + * filters before leaving (i.e. make them reusable, not leak). + */ + ap_filter_recyle(c); return rc; }