1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #define APR_WANT_STRFUNC
21 #include "apr_strings.h"
24 #include "http_config.h"
25 #include "http_core.h"
27 #include "http_request.h"
28 #include "util_filter.h"
31 /* NOTE: Apache's current design doesn't allow a pool to be passed thru,
32 so we depend on a global to hold the correct pool
34 #define FILTER_POOL apr_hook_global_pool
35 #include "ap_hooks.h" /* for apr_hook_global_pool */
38 ** This macro returns true/false if a given filter should be inserted BEFORE
39 ** another filter. This will happen when one of: 1) there isn't another
40 ** filter; 2) that filter has a higher filter type (class); 3) that filter
41 ** corresponds to a different request.
43 #define INSERT_BEFORE(f, before_this) ((before_this) == NULL \
44 || (before_this)->frec->ftype > (f)->frec->ftype \
45 || (before_this)->r != (f)->r)
47 /* Trie structure to hold the mapping from registered
48 * filter names to filters
51 /* we know core's module_index is 0 */
52 #undef APLOG_MODULE_INDEX
53 #define APLOG_MODULE_INDEX AP_CORE_MODULE_INDEX
55 struct ap_filter_private {
56 /* Link to a pending_ring (keep first preferably) */
57 APR_RING_ENTRY(ap_filter_private) pending;
59 /* Backref to owning filter */
63 apr_bucket_brigade *bb;
64 /* Dedicated pool to use for deferred writes. */
65 apr_pool_t *deferred_pool;
67 APR_RING_HEAD(pending_ring, ap_filter_private);
70 APR_RING_ENTRY(spare_data) link;
73 APR_RING_HEAD(spare_ring, spare_data);
75 struct ap_filter_conn_ctx {
76 struct pending_ring *pending_input_filters;
77 struct pending_ring *pending_output_filters;
79 struct spare_ring *spare_containers,
85 typedef struct filter_trie_node filter_trie_node;
89 filter_trie_node *child;
90 } filter_trie_child_ptr;
92 /* Each trie node has an array of pointers to its children.
93 * The array is kept in sorted order so that add_any_filter()
94 * can do a binary search
96 struct filter_trie_node {
97 ap_filter_rec_t *frec;
98 filter_trie_child_ptr *children;
103 #define TRIE_INITIAL_SIZE 4
105 /* Link a trie node to its parent
107 static void trie_node_link(apr_pool_t *p, filter_trie_node *parent,
108 filter_trie_node *child, int c)
112 if (parent->nchildren == parent->size) {
113 filter_trie_child_ptr *new;
115 new = (filter_trie_child_ptr *)apr_palloc(p, parent->size *
116 sizeof(filter_trie_child_ptr));
117 memcpy(new, parent->children, parent->nchildren *
118 sizeof(filter_trie_child_ptr));
119 parent->children = new;
122 for (i = 0; i < parent->nchildren; i++) {
123 if (c == parent->children[i].c) {
126 else if (c < parent->children[i].c) {
130 for (j = parent->nchildren; j > i; j--) {
131 parent->children[j].c = parent->children[j - 1].c;
132 parent->children[j].child = parent->children[j - 1].child;
134 parent->children[i].c = c;
135 parent->children[i].child = child;
140 /* Allocate a new node for a trie.
141 * If parent is non-NULL, link the new node under the parent node with
142 * key 'c' (or, if an existing child node matches, return that one)
144 static filter_trie_node *trie_node_alloc(apr_pool_t *p,
145 filter_trie_node *parent, char c)
147 filter_trie_node *new_node;
150 for (i = 0; i < parent->nchildren; i++) {
151 if (c == parent->children[i].c) {
152 return parent->children[i].child;
154 else if (c < parent->children[i].c) {
159 (filter_trie_node *)apr_palloc(p, sizeof(filter_trie_node));
160 trie_node_link(p, parent, new_node, c);
162 else { /* No parent node */
163 new_node = (filter_trie_node *)apr_palloc(p,
164 sizeof(filter_trie_node));
167 new_node->frec = NULL;
168 new_node->nchildren = 0;
169 new_node->size = TRIE_INITIAL_SIZE;
170 new_node->children = (filter_trie_child_ptr *)apr_palloc(p,
171 new_node->size * sizeof(filter_trie_child_ptr));
175 static filter_trie_node *registered_output_filters = NULL;
176 static filter_trie_node *registered_input_filters = NULL;
179 static apr_status_t filter_cleanup(void *ctx)
181 registered_output_filters = NULL;
182 registered_input_filters = NULL;
186 static ap_filter_rec_t *get_filter_handle(const char *name,
187 const filter_trie_node *filter_set)
191 const filter_trie_node *node;
194 for (n = name; *n; n++) {
197 end = node->nchildren - 1;
198 while (end >= start) {
199 int middle = (end + start) / 2;
200 char ch = node->children[middle].c;
202 node = node->children[middle].child;
218 if (node && node->frec) {
225 AP_DECLARE(ap_filter_rec_t *)ap_get_output_filter_handle(const char *name)
227 return get_filter_handle(name, registered_output_filters);
230 AP_DECLARE(ap_filter_rec_t *)ap_get_input_filter_handle(const char *name)
232 return get_filter_handle(name, registered_input_filters);
235 static ap_filter_rec_t *register_filter(const char *name,
236 ap_filter_func filter_func,
237 ap_init_filter_func filter_init,
238 ap_filter_type ftype,
239 ap_filter_direction_e direction,
240 filter_trie_node **reg_filter_set)
242 ap_filter_rec_t *frec;
243 char *normalized_name;
245 filter_trie_node *node;
247 if (!*reg_filter_set) {
248 *reg_filter_set = trie_node_alloc(FILTER_POOL, NULL, 0);
251 normalized_name = apr_pstrdup(FILTER_POOL, name);
252 ap_str_tolower(normalized_name);
254 node = *reg_filter_set;
255 for (n = normalized_name; *n; n++) {
256 filter_trie_node *child = trie_node_alloc(FILTER_POOL, node, *n);
257 if (apr_isalpha(*n)) {
258 trie_node_link(FILTER_POOL, node, child, apr_toupper(*n));
266 frec = apr_pcalloc(FILTER_POOL, sizeof(*frec));
268 frec->name = normalized_name;
270 frec->filter_func = filter_func;
271 frec->filter_init_func = filter_init;
273 frec->direction = direction;
275 apr_pool_cleanup_register(FILTER_POOL, NULL, filter_cleanup,
276 apr_pool_cleanup_null);
280 AP_DECLARE(ap_filter_rec_t *) ap_register_input_filter(const char *name,
281 ap_in_filter_func filter_func,
282 ap_init_filter_func filter_init,
283 ap_filter_type ftype)
286 f.in_func = filter_func;
287 return register_filter(name, f, filter_init, ftype, AP_FILTER_INPUT,
288 ®istered_input_filters);
291 AP_DECLARE(ap_filter_rec_t *) ap_register_output_filter(const char *name,
292 ap_out_filter_func filter_func,
293 ap_init_filter_func filter_init,
294 ap_filter_type ftype)
296 return ap_register_output_filter_protocol(name, filter_func,
297 filter_init, ftype, 0);
300 AP_DECLARE(ap_filter_rec_t *) ap_register_output_filter_protocol(
302 ap_out_filter_func filter_func,
303 ap_init_filter_func filter_init,
304 ap_filter_type ftype,
305 unsigned int proto_flags)
307 ap_filter_rec_t* ret ;
309 f.out_func = filter_func;
310 ret = register_filter(name, f, filter_init, ftype, AP_FILTER_OUTPUT,
311 ®istered_output_filters);
312 ret->proto_flags = proto_flags ;
316 static struct ap_filter_conn_ctx *get_conn_ctx(conn_rec *c)
318 struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
320 c->filter_conn_ctx = x = apr_pcalloc(c->pool, sizeof(*x));
326 void make_spare_ring(struct spare_ring **ring, apr_pool_t *p)
329 *ring = apr_palloc(p, sizeof(**ring));
330 APR_RING_INIT(*ring, spare_data, link);
334 static void *get_spare(conn_rec *c, struct spare_ring *ring)
338 if (ring && !APR_RING_EMPTY(ring, spare_data, link)) {
339 struct spare_data *sdata = APR_RING_FIRST(ring);
340 struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
344 APR_RING_REMOVE(sdata, link);
345 make_spare_ring(&x->spare_containers, c->pool);
346 APR_RING_INSERT_TAIL(x->spare_containers, sdata, spare_data, link);
352 static void put_spare(conn_rec *c, void *data, struct spare_ring **ring)
354 struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
355 struct spare_data *sdata;
357 if (!x->spare_containers || APR_RING_EMPTY(x->spare_containers,
359 sdata = apr_palloc(c->pool, sizeof(*sdata));
362 sdata = APR_RING_FIRST(x->spare_containers);
363 APR_RING_REMOVE(sdata, link);
367 make_spare_ring(ring, c->pool);
368 APR_RING_INSERT_TAIL(*ring, sdata, spare_data, link);
371 AP_DECLARE(apr_bucket_brigade *) ap_acquire_brigade(conn_rec *c)
373 struct ap_filter_conn_ctx *x = get_conn_ctx(c);
374 apr_bucket_brigade *bb = get_spare(c, x->spare_brigades);
376 return bb ? bb : apr_brigade_create(c->pool, c->bucket_alloc);
379 AP_DECLARE(void) ap_release_brigade(conn_rec *c, apr_bucket_brigade *bb)
381 struct ap_filter_conn_ctx *x = get_conn_ctx(c);
383 AP_DEBUG_ASSERT(bb->p == c->pool && bb->bucket_alloc == c->bucket_alloc);
385 apr_brigade_cleanup(bb);
386 put_spare(c, bb, &x->spare_brigades);
389 static apr_status_t request_filter_cleanup(void *arg)
391 ap_filter_t *f = arg;
393 struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
395 /* A request filter is cleaned up with an EOR bucket, so possibly
396 * while it is handling/passing the EOR, and we want each filter or
397 * ap_filter_output_pending() to be able to dereference f until they
398 * return. So request filters are recycled in dead_filters and will only
399 * be moved to spare_filters when recycle_dead_filters() is called, i.e.
400 * in ap_filter_{in,out}put_pending(). Set f->r to NULL still for any use
401 * after free to crash quite reliably.
404 put_spare(c, f, &x->dead_filters);
409 static void recycle_dead_filters(conn_rec *c)
411 struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
413 if (!x || !x->dead_filters) {
417 make_spare_ring(&x->spare_filters, c->pool);
418 APR_RING_CONCAT(x->spare_filters, x->dead_filters, spare_data, link);
421 static ap_filter_t *add_any_filter_handle(ap_filter_rec_t *frec, void *ctx,
422 request_rec *r, conn_rec *c,
423 ap_filter_t **r_filters,
424 ap_filter_t **p_filters,
425 ap_filter_t **c_filters)
429 struct ap_filter_conn_ctx *x;
430 struct ap_filter_private *fp;
432 if (frec->ftype < AP_FTYPE_PROTOCOL) {
437 ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(00080)
438 "a content filter was added without a request: %s", frec->name);
442 else if (frec->ftype < AP_FTYPE_CONNECTION) {
447 ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(00081)
448 "a protocol filter was added without a request: %s", frec->name);
457 f = get_spare(c, x->spare_filters);
462 f = apr_palloc(c->pool, sizeof(*f));
463 fp = apr_palloc(c->pool, sizeof(*fp));
465 memset(f, 0, sizeof(*f));
466 memset(fp, 0, sizeof(*fp));
467 APR_RING_ELEM_INIT(fp, pending);
473 /* f->r must always be NULL for connection filters */
474 if (r && frec->ftype < AP_FTYPE_CONNECTION) {
475 apr_pool_cleanup_register(r->pool, f, request_filter_cleanup,
476 apr_pool_cleanup_null);
481 if (INSERT_BEFORE(f, *outf)) {
485 ap_filter_t *first = NULL;
488 /* If we are adding our first non-connection filter,
489 * Then don't try to find the right location, it is
490 * automatically first.
492 if (*r_filters != *c_filters) {
494 while (first && (first->next != (*outf))) {
499 if (first && first != (*outf)) {
506 ap_filter_t *fscan = *outf;
507 while (!INSERT_BEFORE(f, fscan->next))
510 f->next = fscan->next;
514 if (frec->ftype < AP_FTYPE_CONNECTION && (*r_filters == *c_filters)) {
515 *r_filters = *p_filters;
520 static ap_filter_t *add_any_filter(const char *name, void *ctx,
521 request_rec *r, conn_rec *c,
522 const filter_trie_node *reg_filter_set,
523 ap_filter_t **r_filters,
524 ap_filter_t **p_filters,
525 ap_filter_t **c_filters)
527 if (reg_filter_set) {
529 const filter_trie_node *node;
531 node = reg_filter_set;
532 for (n = name; *n; n++) {
535 end = node->nchildren - 1;
536 while (end >= start) {
537 int middle = (end + start) / 2;
538 char ch = node->children[middle].c;
540 node = node->children[middle].child;
556 if (node && node->frec) {
557 return add_any_filter_handle(node->frec, ctx, r, c, r_filters,
558 p_filters, c_filters);
562 ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, r ? r->connection : c, APLOGNO(00082)
563 "an unknown filter was not added: %s", name);
567 AP_DECLARE(ap_filter_t *) ap_add_input_filter(const char *name, void *ctx,
568 request_rec *r, conn_rec *c)
570 return add_any_filter(name, ctx, r, c, registered_input_filters,
571 r ? &r->input_filters : NULL,
572 r ? &r->proto_input_filters : NULL, &c->input_filters);
575 AP_DECLARE(ap_filter_t *) ap_add_input_filter_handle(ap_filter_rec_t *f,
580 return add_any_filter_handle(f, ctx, r, c, r ? &r->input_filters : NULL,
581 r ? &r->proto_input_filters : NULL,
585 AP_DECLARE(ap_filter_t *) ap_add_output_filter(const char *name, void *ctx,
586 request_rec *r, conn_rec *c)
588 return add_any_filter(name, ctx, r, c, registered_output_filters,
589 r ? &r->output_filters : NULL,
590 r ? &r->proto_output_filters : NULL, &c->output_filters);
593 AP_DECLARE(ap_filter_t *) ap_add_output_filter_handle(ap_filter_rec_t *f,
598 return add_any_filter_handle(f, ctx, r, c, r ? &r->output_filters : NULL,
599 r ? &r->proto_output_filters : NULL,
603 static APR_INLINE int is_pending_filter(ap_filter_t *f)
605 struct ap_filter_private *fp = f->priv;
606 return APR_RING_NEXT(fp, pending) != fp;
609 static apr_status_t pending_filter_cleanup(void *arg)
611 ap_filter_t *f = arg;
612 struct ap_filter_private *fp = f->priv;
614 if (is_pending_filter(f)) {
615 APR_RING_REMOVE(fp, pending);
616 APR_RING_ELEM_INIT(fp, pending);
620 ap_release_brigade(f->c, fp->bb);
627 static void remove_any_filter(ap_filter_t *f, ap_filter_t **r_filt, ap_filter_t **p_filt,
628 ap_filter_t **c_filt)
630 ap_filter_t **curr = r_filt ? r_filt : c_filt;
631 ap_filter_t *fscan = *curr;
633 pending_filter_cleanup(f);
635 if (p_filt && *p_filt == f)
636 *p_filt = (*p_filt)->next;
639 *curr = (*curr)->next;
643 while (fscan->next != f) {
644 if (!(fscan = fscan->next)) {
649 fscan->next = f->next;
652 AP_DECLARE(void) ap_remove_input_filter(ap_filter_t *f)
654 remove_any_filter(f, f->r ? &f->r->input_filters : NULL,
655 f->r ? &f->r->proto_input_filters : NULL,
656 &f->c->input_filters);
659 AP_DECLARE(void) ap_remove_output_filter(ap_filter_t *f)
661 struct ap_filter_private *fp = f->priv;
663 if (fp->deferred_pool) {
664 AP_DEBUG_ASSERT(fp->bb);
665 apr_brigade_cleanup(fp->bb);
666 apr_pool_destroy(fp->deferred_pool);
667 fp->deferred_pool = NULL;
670 remove_any_filter(f, f->r ? &f->r->output_filters : NULL,
671 f->r ? &f->r->proto_output_filters : NULL,
672 &f->c->output_filters);
675 AP_DECLARE(apr_status_t) ap_remove_input_filter_byhandle(ap_filter_t *next,
678 ap_filter_t *found = NULL;
679 ap_filter_rec_t *filter;
684 filter = ap_get_input_filter_handle(handle);
690 if (next->frec == filter) {
697 ap_remove_input_filter(found);
703 AP_DECLARE(apr_status_t) ap_remove_output_filter_byhandle(ap_filter_t *next,
706 ap_filter_t *found = NULL;
707 ap_filter_rec_t *filter;
712 filter = ap_get_output_filter_handle(handle);
718 if (next->frec == filter) {
725 ap_remove_output_filter(found);
733 * Read data from the next filter in the filter stack. Data should be
734 * modified in the bucket brigade that is passed in. The core allocates the
735 * bucket brigade, modules that wish to replace large chunks of data or to
736 * save data off to the side should probably create their own temporary
737 * brigade especially for that use.
739 AP_DECLARE(apr_status_t) ap_get_brigade(ap_filter_t *next,
740 apr_bucket_brigade *bb,
741 ap_input_mode_t mode,
742 apr_read_type_e block,
746 return next->frec->filter_func.in_func(next, bb, mode, block,
749 return AP_NOBODY_READ;
752 /* Pass the buckets to the next filter in the filter stack. If the
753 * current filter is a handler, we should get NULL passed in instead of
754 * the current filter. At that point, we can just call the first filter in
755 * the stack, or r->output_filters.
757 AP_DECLARE(apr_status_t) ap_pass_brigade(ap_filter_t *next,
758 apr_bucket_brigade *bb)
761 apr_bucket *e = APR_BRIGADE_LAST(bb);
763 if (e != APR_BRIGADE_SENTINEL(bb) && APR_BUCKET_IS_EOS(e) && next->r) {
764 /* This is only safe because HTTP_HEADER filter is always in
765 * the filter stack. This ensures that there is ALWAYS a
766 * request-based filter that we can attach this to. If the
767 * HTTP_FILTER is removed, and another filter is not put in its
768 * place, then handlers like mod_cgi, which attach their own
769 * EOS bucket to the brigade will be broken, because we will
770 * get two EOS buckets on the same request.
772 next->r->eos_sent = 1;
774 /* remember the eos for internal redirects, too */
776 request_rec *prev = next->r->prev;
784 return next->frec->filter_func.out_func(next, bb);
786 return AP_NOBODY_WROTE;
789 /* Pass the buckets to the next filter in the filter stack
790 * checking return status for filter errors.
791 * returns: OK if ap_pass_brigade returns APR_SUCCESS
792 * AP_FILTER_ERROR if filter error exists
793 * HTTP_INTERNAL_SERVER_ERROR for all other cases
794 * logged with optional errmsg
796 AP_DECLARE(apr_status_t) ap_pass_brigade_fchk(request_rec *r,
797 apr_bucket_brigade *bb,
803 rv = ap_pass_brigade(r->output_filters, bb);
804 if (rv != APR_SUCCESS) {
805 if (rv != AP_FILTER_ERROR) {
807 ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(00083)
808 "ap_pass_brigade returned %d", rv);
813 res = apr_pvsprintf(r->pool, fmt, ap);
815 ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(03158)
818 return HTTP_INTERNAL_SERVER_ERROR;
820 return AP_FILTER_ERROR;
825 AP_DECLARE(apr_status_t) ap_save_brigade(ap_filter_t *f,
826 apr_bucket_brigade **saveto,
827 apr_bucket_brigade **b, apr_pool_t *p)
830 apr_status_t rv, srv = APR_SUCCESS;
832 /* If have never stored any data in the filter, then we had better
833 * create an empty bucket brigade so that we can concat. Register
834 * a cleanup to zero out the pointer if the pool is cleared.
837 *saveto = apr_brigade_create(p, f->c->bucket_alloc);
840 for (e = APR_BRIGADE_FIRST(*b);
841 e != APR_BRIGADE_SENTINEL(*b);
842 e = APR_BUCKET_NEXT(e))
844 rv = apr_bucket_setaside(e, p);
846 /* If the bucket type does not implement setaside, then
847 * (hopefully) morph it into a bucket type which does, and set
849 if (rv == APR_ENOTIMPL) {
853 rv = apr_bucket_read(e, &s, &n, APR_BLOCK_READ);
854 if (rv == APR_SUCCESS) {
855 rv = apr_bucket_setaside(e, p);
859 if (rv != APR_SUCCESS) {
861 /* Return an error but still save the brigade if
862 * ->setaside() is really not implemented. */
863 if (rv != APR_ENOTIMPL) {
868 APR_BRIGADE_CONCAT(*saveto, *b);
872 AP_DECLARE(int) ap_filter_prepare_brigade(ap_filter_t *f)
875 struct ap_filter_conn_ctx *x = get_conn_ctx(c);
876 struct ap_filter_private *fp = f->priv, *e;
877 struct pending_ring **ref, *pendings;
880 if (is_pending_filter(f)) {
885 fp->bb = ap_acquire_brigade(c);
887 /* Take care of request filters that don't remove themselves
888 * from the chain(s), when f->r is being destroyed.
890 apr_pool_cleanup_register(f->r->pool, f,
891 pending_filter_cleanup,
892 apr_pool_cleanup_null);
895 /* In fp->bb there may be buckets on fp->deferred_pool, so take
896 * care to always pre_cleanup the former before the latter.
898 apr_pool_pre_cleanup_register(c->pool, f,
899 pending_filter_cleanup);
903 if (f->frec->direction == AP_FILTER_INPUT) {
904 ref = &x->pending_input_filters;
907 ref = &x->pending_output_filters;
911 /* Pending reads/writes must happen in the reverse order of the actual
912 * in/output filters (in/outer most first), though we still maintain the
913 * ring in the same "next" order as filters (walking is backward). So find
914 * the first f->next filter already in place and insert before if
915 * any, otherwise insert last.
918 for (next = f->next; next; next = next->next) {
919 for (e = APR_RING_FIRST(pendings);
920 e != APR_RING_SENTINEL(pendings, ap_filter_private, pending);
921 e = APR_RING_NEXT(e, pending)) {
922 if (e == next->priv) {
923 APR_RING_INSERT_BEFORE(e, fp, pending);
930 pendings = *ref = apr_palloc(c->pool, sizeof(*pendings));
931 APR_RING_INIT(pendings, ap_filter_private, pending);
933 APR_RING_INSERT_TAIL(pendings, fp, ap_filter_private, pending);
937 AP_DECLARE(apr_status_t) ap_filter_setaside_brigade(ap_filter_t *f,
938 apr_bucket_brigade *bb)
940 struct ap_filter_private *fp = f->priv;
942 ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, f->c,
943 "setaside %s brigade to %s brigade in '%s' output filter",
944 APR_BRIGADE_EMPTY(bb) ? "empty" : "full",
945 (!fp->bb || APR_BRIGADE_EMPTY(fp->bb)) ? "empty" : "full",
948 if (!APR_BRIGADE_EMPTY(bb)) {
950 * Set aside the brigade bb within fp->bb.
952 ap_filter_prepare_brigade(f);
954 /* decide what pool we setaside to, request pool or deferred pool? */
957 for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e =
958 APR_BUCKET_NEXT(e)) {
959 if (APR_BUCKET_IS_TRANSIENT(e)) {
960 int rv = apr_bucket_setaside(e, f->r->pool);
961 if (rv != APR_SUCCESS) {
966 APR_BRIGADE_CONCAT(fp->bb, bb);
969 if (!fp->deferred_pool) {
970 apr_pool_create(&fp->deferred_pool, f->c->pool);
971 apr_pool_tag(fp->deferred_pool, "deferred_pool");
973 return ap_save_brigade(f, &fp->bb, &bb, fp->deferred_pool);
977 else if (fp->deferred_pool) {
979 * There are no more requests in the pipeline. We can just clear the
982 AP_DEBUG_ASSERT(fp->bb);
983 apr_brigade_cleanup(fp->bb);
984 apr_pool_clear(fp->deferred_pool);
989 void ap_filter_adopt_brigade(ap_filter_t *f, apr_bucket_brigade *bb)
991 struct ap_filter_private *fp = f->priv;
993 ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, f->c,
994 "adopt %s brigade to %s brigade in '%s' output filter",
995 APR_BRIGADE_EMPTY(bb) ? "empty" : "full",
996 (!fp->bb || APR_BRIGADE_EMPTY(fp->bb)) ? "empty" : "full",
999 if (!APR_BRIGADE_EMPTY(bb)) {
1000 ap_filter_prepare_brigade(f);
1001 APR_BRIGADE_CONCAT(fp->bb, bb);
1005 AP_DECLARE(apr_status_t) ap_filter_reinstate_brigade(ap_filter_t *f,
1006 apr_bucket_brigade *bb,
1007 apr_bucket **flush_upto)
1009 apr_bucket *bucket, *next;
1010 apr_size_t bytes_in_brigade, non_file_bytes_in_brigade;
1011 int eor_buckets_in_brigade, morphing_bucket_in_brigade;
1012 struct ap_filter_private *fp = f->priv;
1013 core_server_config *conf;
1015 ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, f->c,
1016 "reinstate %s brigade to %s brigade in '%s' output filter",
1017 (!fp->bb || APR_BRIGADE_EMPTY(fp->bb) ? "empty" : "full"),
1018 (APR_BRIGADE_EMPTY(bb) ? "empty" : "full"),
1022 APR_BRIGADE_PREPEND(bb, fp->bb);
1025 /* Just prepend all. */
1032 * Determine if and up to which bucket we need to do a blocking write:
1034 * a) The brigade contains a flush bucket: Do a blocking write
1035 * of everything up that point.
1037 * b) The request is in CONN_STATE_HANDLER state, and the brigade
1038 * contains at least flush_max_threshold bytes in non-file
1039 * buckets: Do blocking writes until the amount of data in the
1040 * buffer is less than flush_max_threshold. (The point of this
1041 * rule is to provide flow control, in case a handler is
1042 * streaming out lots of data faster than the data can be
1043 * sent to the client.)
1045 * c) The request is in CONN_STATE_HANDLER state, and the brigade
1046 * contains at least flush_max_pipelined EOR buckets:
1047 * Do blocking writes until less than flush_max_pipelined EOR
1048 * buckets are left. (The point of this rule is to prevent too many
1049 * FDs being kept open by pipelined requests, possibly allowing a
1052 * d) The request is being served by a connection filter and the
1053 * brigade contains a morphing bucket: If there was no other
1054 * reason to do a blocking write yet, try reading the bucket. If its
1055 * contents fit into memory before flush_max_threshold is reached,
1056 * everything is fine. Otherwise we need to do a blocking write the
1057 * up to and including the morphing bucket, because ap_save_brigade()
1058 * would read the whole bucket into memory later on.
1061 bytes_in_brigade = 0;
1062 non_file_bytes_in_brigade = 0;
1063 eor_buckets_in_brigade = 0;
1064 morphing_bucket_in_brigade = 0;
1066 conf = ap_get_core_module_config(f->c->base_server->module_config);
1068 for (bucket = APR_BRIGADE_FIRST(bb); bucket != APR_BRIGADE_SENTINEL(bb);
1070 next = APR_BUCKET_NEXT(bucket);
1072 if (!APR_BUCKET_IS_METADATA(bucket)) {
1073 if (bucket->length == (apr_size_t)-1) {
1075 * A setaside of morphing buckets would read everything into
1076 * memory. Instead, we will flush everything up to and
1077 * including this bucket.
1079 morphing_bucket_in_brigade = 1;
1082 bytes_in_brigade += bucket->length;
1083 if (!APR_BUCKET_IS_FILE(bucket))
1084 non_file_bytes_in_brigade += bucket->length;
1087 else if (AP_BUCKET_IS_EOR(bucket)) {
1088 eor_buckets_in_brigade++;
1091 if (APR_BUCKET_IS_FLUSH(bucket)
1092 || non_file_bytes_in_brigade >= conf->flush_max_threshold
1093 || (!f->r && morphing_bucket_in_brigade)
1094 || eor_buckets_in_brigade > conf->flush_max_pipelined) {
1095 /* this segment of the brigade MUST be sent before returning. */
1097 if (APLOGctrace6(f->c)) {
1098 char *reason = APR_BUCKET_IS_FLUSH(bucket) ?
1100 (non_file_bytes_in_brigade >= conf->flush_max_threshold) ?
1102 (!f->r && morphing_bucket_in_brigade) ? "morphing bucket" :
1103 "max requests in pipeline";
1104 ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, f->c,
1105 "will flush because of %s", reason);
1106 ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, f->c,
1107 "seen in brigade%s: bytes: %" APR_SIZE_T_FMT
1108 ", non-file bytes: %" APR_SIZE_T_FMT ", eor "
1109 "buckets: %d, morphing buckets: %d",
1110 *flush_upto == NULL ? " so far"
1111 : " since last flush point",
1113 non_file_bytes_in_brigade,
1114 eor_buckets_in_brigade,
1115 morphing_bucket_in_brigade);
1118 * Defer the actual blocking write to avoid doing many writes.
1122 bytes_in_brigade = 0;
1123 non_file_bytes_in_brigade = 0;
1124 eor_buckets_in_brigade = 0;
1125 morphing_bucket_in_brigade = 0;
1129 ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, f->c,
1130 "brigade contains: bytes: %" APR_SIZE_T_FMT
1131 ", non-file bytes: %" APR_SIZE_T_FMT
1132 ", eor buckets: %d, morphing buckets: %d",
1133 bytes_in_brigade, non_file_bytes_in_brigade,
1134 eor_buckets_in_brigade, morphing_bucket_in_brigade);
1139 AP_DECLARE(int) ap_filter_should_yield(ap_filter_t *f)
1142 * Handle the AsyncFilter directive. We limit the filters that are
1143 * eligible for asynchronous handling here.
1145 if (f->frec->ftype < f->c->async_filter) {
1150 * This function decides whether a filter should yield due to buffered
1151 * data in a downstream filter. If a downstream filter buffers we
1152 * must back off so we don't overwhelm the server. If this function
1153 * returns true, the filter should call ap_filter_setaside_brigade()
1154 * to save unprocessed buckets, and then reinstate those buckets on
1155 * the next call with ap_filter_reinstate_brigade() and continue
1156 * where it left off.
1158 * If this function is forced to return zero, we return back to
1159 * synchronous filter behaviour.
1161 * Subrequests present us with a problem - we don't know how much data
1162 * they will produce and therefore how much buffering we'll need, and
1163 * if a subrequest had to trigger buffering, but next subrequest wouldn't
1164 * know when the previous one had finished sending data and buckets
1165 * could be sent out of order.
1167 * In the case of subrequests, deny the ability to yield. When the data
1168 * reaches the filters from the main request, they will be setaside
1169 * there in the right order and the request will be given the
1170 * opportunity to yield.
1172 if (f->r && f->r->main) {
1177 * This is either a main request or internal redirect, or it is a
1178 * connection filter. Yield if there is any buffered data downstream
1182 struct ap_filter_private *fp = f->priv;
1183 if (fp->bb && !APR_BRIGADE_EMPTY(fp->bb)) {
1191 AP_DECLARE_NONSTD(int) ap_filter_output_pending(conn_rec *c)
1193 struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
1194 struct ap_filter_private *fp, *prev;
1195 apr_bucket_brigade *bb;
1198 if (!x || !x->pending_output_filters) {
1202 /* Flush outer most filters first for ap_filter_should_yield(f->next)
1203 * to be relevant in the previous ones (e.g. ap_request_core_filter()
1204 * won't pass its buckets if its next filters yield already).
1206 bb = ap_acquire_brigade(c);
1207 for (fp = APR_RING_LAST(x->pending_output_filters);
1208 fp != APR_RING_SENTINEL(x->pending_output_filters,
1209 ap_filter_private, pending);
1211 /* If a filter removes itself from the filters stack (when run), it
1212 * also orphans itself from the ring, so save "prev" here to avoid
1213 * an infinite loop in this case.
1215 prev = APR_RING_PREV(fp, pending);
1217 AP_DEBUG_ASSERT(fp->bb);
1218 if (!APR_BRIGADE_EMPTY(fp->bb)) {
1219 ap_filter_t *f = fp->f;
1222 rv = ap_pass_brigade(f, bb);
1223 apr_brigade_cleanup(bb);
1225 if (rv != APR_SUCCESS) {
1226 ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(00470)
1227 "write failure in '%s' output filter", f->frec->name);
1232 if (fp->bb && !APR_BRIGADE_EMPTY(fp->bb)) {
1238 ap_release_brigade(c, bb);
1241 /* All filters have returned, time to recycle/unleak ap_filter_t-s
1242 * before leaving (i.e. make them reusable).
1244 recycle_dead_filters(c);
1249 AP_DECLARE_NONSTD(int) ap_filter_input_pending(conn_rec *c)
1251 struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
1252 struct ap_filter_private *fp;
1255 if (!x || !x->pending_input_filters) {
1259 for (fp = APR_RING_LAST(x->pending_input_filters);
1260 fp != APR_RING_SENTINEL(x->pending_input_filters,
1261 ap_filter_private, pending);
1262 fp = APR_RING_PREV(fp, pending)) {
1265 /* if there is a leading non-morphing bucket
1266 * in place, then we have data pending
1268 AP_DEBUG_ASSERT(fp->bb);
1269 e = APR_BRIGADE_FIRST(fp->bb);
1270 if (e != APR_BRIGADE_SENTINEL(fp->bb)
1271 && e->length != (apr_size_t)(-1)) {
1278 /* All filters have returned, time to recycle/unleak ap_filter_t-s
1279 * before leaving (i.e. make them reusable).
1281 recycle_dead_filters(c);
1286 AP_DECLARE_NONSTD(apr_status_t) ap_filter_flush(apr_bucket_brigade *bb,
1289 ap_filter_t *f = ctx;
1292 rv = ap_pass_brigade(f, bb);
1294 /* Before invocation of the flush callback, apr_brigade_write et
1295 * al may place transient buckets in the brigade, which will fall
1296 * out of scope after returning. Empty the brigade here, to avoid
1297 * issues with leaving such buckets in the brigade if some filter
1298 * fails and leaves a non-empty brigade. */
1299 apr_brigade_cleanup(bb);
1304 AP_DECLARE(apr_status_t) ap_fflush(ap_filter_t *f, apr_bucket_brigade *bb)
1308 b = apr_bucket_flush_create(f->c->bucket_alloc);
1309 APR_BRIGADE_INSERT_TAIL(bb, b);
1310 return ap_pass_brigade(f, bb);
1313 AP_DECLARE_NONSTD(apr_status_t) ap_fputstrs(ap_filter_t *f,
1314 apr_bucket_brigade *bb, ...)
1320 rv = apr_brigade_vputstrs(bb, ap_filter_flush, f, args);
1325 AP_DECLARE_NONSTD(apr_status_t) ap_fprintf(ap_filter_t *f,
1326 apr_bucket_brigade *bb,
1333 va_start(args, fmt);
1334 rv = apr_brigade_vprintf(bb, ap_filter_flush, f, fmt, args);
1339 AP_DECLARE(void) ap_filter_protocol(ap_filter_t *f, unsigned int flags)
1341 f->frec->proto_flags = flags ;