1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @file core_filters.c
19 * @brief Core input/output network filters.
23 #include "apr_strings.h"
25 #include "apr_fnmatch.h"
27 #include "apr_thread_proc.h" /* for RLIMIT stuff */
29 #define APR_WANT_IOVEC
30 #define APR_WANT_STRFUNC
31 #define APR_WANT_MEMFUNC
34 #include "ap_config.h"
36 #include "http_config.h"
37 #include "http_core.h"
38 #include "http_protocol.h" /* For index_of_response(). Grump. */
39 #include "http_request.h"
40 #include "http_vhost.h"
41 #include "http_main.h" /* For the default_handler below... */
44 #include "http_connection.h"
45 #include "apr_buckets.h"
46 #include "util_filter.h"
47 #include "util_ebcdic.h"
48 #include "mpm_common.h"
49 #include "scoreboard.h"
51 #include "ap_listen.h"
53 #include "mod_so.h" /* for ap_find_loaded_module_symbol */
55 #define AP_MIN_SENDFILE_BYTES (256)
58 * Remove all zero length buckets from the brigade.
60 #define BRIGADE_NORMALIZE(b) \
62 apr_bucket *e = APR_BRIGADE_FIRST(b); \
64 if (e->length == 0 && !APR_BUCKET_IS_METADATA(e)) { \
66 d = APR_BUCKET_NEXT(e); \
67 apr_bucket_delete(e); \
71 e = APR_BUCKET_NEXT(e); \
73 } while (!APR_BRIGADE_EMPTY(b) && (e != APR_BRIGADE_SENTINEL(b))); \
76 /* we know core's module_index is 0 */
77 #undef APLOG_MODULE_INDEX
78 #define APLOG_MODULE_INDEX AP_CORE_MODULE_INDEX
80 struct core_output_filter_ctx {
81 apr_bucket_brigade *buffered_bb;
82 apr_bucket_brigade *tmp_flush_bb;
83 apr_pool_t *deferred_write_pool;
84 apr_size_t bytes_written;
87 struct core_filter_ctx {
88 apr_bucket_brigade *b;
89 apr_bucket_brigade *tmpbb;
93 apr_status_t ap_core_input_filter(ap_filter_t *f, apr_bucket_brigade *b,
94 ap_input_mode_t mode, apr_read_type_e block,
98 core_net_rec *net = f->ctx;
99 core_ctx_t *ctx = net->in_ctx;
103 if (mode == AP_MODE_INIT) {
105 * this mode is for filters that might need to 'initialize'
106 * a connection before reading request data from a client.
107 * NNTP over SSL for example needs to handshake before the
108 * server sends the welcome message.
109 * such filters would have changed the mode before this point
110 * is reached. however, protocol modules such as NNTP should
111 * not need to know anything about SSL. given the example, if
112 * SSL is not in the filter chain, AP_MODE_INIT is a noop.
119 net->in_ctx = ctx = apr_palloc(f->c->pool, sizeof(*ctx));
120 ctx->b = apr_brigade_create(f->c->pool, f->c->bucket_alloc);
121 ctx->tmpbb = apr_brigade_create(f->c->pool, f->c->bucket_alloc);
122 /* seed the brigade with the client socket. */
123 rv = ap_run_insert_network_bucket(f->c, ctx->b, net->client_socket);
124 if (rv != APR_SUCCESS)
127 else if (APR_BRIGADE_EMPTY(ctx->b)) {
131 /* ### This is bad. */
132 BRIGADE_NORMALIZE(ctx->b);
134 /* check for empty brigade again *AFTER* BRIGADE_NORMALIZE()
135 * If we have lost our socket bucket (see above), we are EOF.
137 * Ideally, this should be returning SUCCESS with EOS bucket, but
138 * some higher-up APIs (spec. read_request_line via ap_rgetline)
139 * want an error code. */
140 if (APR_BRIGADE_EMPTY(ctx->b)) {
144 if (mode == AP_MODE_GETLINE) {
145 /* we are reading a single LF line, e.g. the HTTP headers */
146 rv = apr_brigade_split_line(b, ctx->b, block, HUGE_STRING_LEN);
147 /* We should treat EAGAIN here the same as we do for EOF (brigade is
148 * empty). We do this by returning whatever we have read. This may
149 * or may not be bogus, but is consistent (for now) with EOF logic.
151 if (APR_STATUS_IS_EAGAIN(rv) && block == APR_NONBLOCK_READ) {
157 /* ### AP_MODE_PEEK is a horrific name for this mode because we also
158 * eat any CRLFs that we see. That's not the obvious intention of
159 * this mode. Determine whether anyone actually uses this or not. */
160 if (mode == AP_MODE_EATCRLF) {
164 /* The purpose of this loop is to ignore any CRLF (or LF) at the end
165 * of a request. Many browsers send extra lines at the end of POST
166 * requests. We use the PEEK method to determine if there is more
167 * data on the socket, so that we know if we should delay sending the
168 * end of one request until we have served the second request in a
169 * pipelined situation. We don't want to actually delay sending a
170 * response if the server finds a CRLF (or LF), becuause that doesn't
171 * mean that there is another request, just a blank line.
174 if (APR_BRIGADE_EMPTY(ctx->b))
177 e = APR_BRIGADE_FIRST(ctx->b);
179 rv = apr_bucket_read(e, &str, &len, APR_NONBLOCK_READ);
181 if (rv != APR_SUCCESS)
185 while (c < str + len) {
186 if (*c == APR_ASCII_LF)
188 else if (*c == APR_ASCII_CR && *(c + 1) == APR_ASCII_LF)
194 /* If we reach here, we were a bucket just full of CRLFs, so
195 * just toss the bucket. */
196 /* FIXME: Is this the right thing to do in the core? */
197 apr_bucket_delete(e);
202 /* If mode is EXHAUSTIVE, we want to just read everything until the end
203 * of the brigade, which in this case means the end of the socket.
204 * To do this, we attach the brigade that has currently been setaside to
205 * the brigade that was passed down, and send that brigade back.
207 * NOTE: This is VERY dangerous to use, and should only be done with
208 * extreme caution. FWLIW, this would be needed by an MPM like Perchild;
209 * such an MPM can easily request the socket and all data that has been
210 * read, which means that it can pass it to the correct child process.
212 if (mode == AP_MODE_EXHAUSTIVE) {
215 /* Tack on any buckets that were set aside. */
216 APR_BRIGADE_CONCAT(b, ctx->b);
218 /* Since we've just added all potential buckets (which will most
219 * likely simply be the socket bucket) we know this is the end,
220 * so tack on an EOS too. */
221 /* We have read until the brigade was empty, so we know that we
223 e = apr_bucket_eos_create(f->c->bucket_alloc);
224 APR_BRIGADE_INSERT_TAIL(b, e);
228 /* read up to the amount they specified. */
229 if (mode == AP_MODE_READBYTES || mode == AP_MODE_SPECULATIVE) {
232 AP_DEBUG_ASSERT(readbytes > 0);
234 e = APR_BRIGADE_FIRST(ctx->b);
235 rv = apr_bucket_read(e, &str, &len, block);
237 if (APR_STATUS_IS_EAGAIN(rv) && block == APR_NONBLOCK_READ) {
238 /* getting EAGAIN for a blocking read is an error; for a
239 * non-blocking read, return an empty brigade. */
242 else if (rv != APR_SUCCESS) {
245 else if (block == APR_BLOCK_READ && len == 0) {
246 /* We wanted to read some bytes in blocking mode. We read
247 * 0 bytes. Hence, we now assume we are EOS.
249 * When we are in normal mode, return an EOS bucket to the
251 * When we are in speculative mode, leave ctx->b empty, so
252 * that the next call returns an EOS bucket.
254 apr_bucket_delete(e);
256 if (mode == AP_MODE_READBYTES) {
257 e = apr_bucket_eos_create(f->c->bucket_alloc);
258 APR_BRIGADE_INSERT_TAIL(b, e);
263 /* Have we read as much data as we wanted (be greedy)? */
264 if (len < readbytes) {
265 apr_size_t bucket_len;
268 /* We already registered the data in e in len */
269 e = APR_BUCKET_NEXT(e);
270 while ((len < readbytes) && (rv == APR_SUCCESS)
271 && (e != APR_BRIGADE_SENTINEL(ctx->b))) {
272 /* Check for the availability of buckets with known length */
273 if (e->length != -1) {
275 e = APR_BUCKET_NEXT(e);
279 * Read from bucket, but non blocking. If there isn't any
280 * more data, well than this is fine as well, we will
281 * not wait for more since we already got some and we are
282 * only checking if there isn't more.
284 rv = apr_bucket_read(e, &str, &bucket_len,
286 if (rv == APR_SUCCESS) {
288 e = APR_BUCKET_NEXT(e);
294 /* We can only return at most what we read. */
295 if (len < readbytes) {
299 rv = apr_brigade_partition(ctx->b, readbytes, &e);
300 if (rv != APR_SUCCESS) {
304 /* Must do move before CONCAT */
305 ctx->tmpbb = apr_brigade_split_ex(ctx->b, e, ctx->tmpbb);
307 if (mode == AP_MODE_READBYTES) {
308 APR_BRIGADE_CONCAT(b, ctx->b);
310 else if (mode == AP_MODE_SPECULATIVE) {
311 apr_bucket *copy_bucket;
313 for (e = APR_BRIGADE_FIRST(ctx->b);
314 e != APR_BRIGADE_SENTINEL(ctx->b);
315 e = APR_BUCKET_NEXT(e))
317 rv = apr_bucket_copy(e, ©_bucket);
318 if (rv != APR_SUCCESS) {
321 APR_BRIGADE_INSERT_TAIL(b, copy_bucket);
325 /* Take what was originally there and place it back on ctx->b */
326 APR_BRIGADE_CONCAT(ctx->b, ctx->tmpbb);
331 static void setaside_remaining_output(ap_filter_t *f,
332 core_output_filter_ctx_t *ctx,
333 apr_bucket_brigade *bb,
336 static apr_status_t send_brigade_nonblocking(apr_socket_t *s,
337 apr_bucket_brigade *bb,
338 apr_size_t *bytes_written,
341 static void remove_empty_buckets(apr_bucket_brigade *bb);
343 static apr_status_t send_brigade_blocking(apr_socket_t *s,
344 apr_bucket_brigade *bb,
345 apr_size_t *bytes_written,
348 static apr_status_t writev_nonblocking(apr_socket_t *s,
349 struct iovec *vec, apr_size_t nvec,
350 apr_bucket_brigade *bb,
351 apr_size_t *cumulative_bytes_written,
355 static apr_status_t sendfile_nonblocking(apr_socket_t *s,
357 apr_size_t *cumulative_bytes_written,
361 /* XXX: Should these be configurable parameters? */
362 #define THRESHOLD_MIN_WRITE 4096
363 #define THRESHOLD_MAX_BUFFER 65536
364 #define MAX_REQUESTS_IN_PIPELINE 5
366 /* Optional function coming from mod_logio, used for logging of output
369 extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *ap__logio_add_bytes_out;
371 apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
374 core_net_rec *net = f->ctx;
375 core_output_filter_ctx_t *ctx = net->out_ctx;
376 apr_bucket_brigade *bb = NULL;
377 apr_bucket *bucket, *next, *flush_upto = NULL;
378 apr_size_t bytes_in_brigade, non_file_bytes_in_brigade;
379 int eor_buckets_in_brigade, morphing_bucket_in_brigade;
382 /* Fail quickly if the connection has already been aborted. */
384 if (new_bb != NULL) {
385 apr_brigade_cleanup(new_bb);
387 return APR_ECONNABORTED;
391 ctx = apr_pcalloc(c->pool, sizeof(*ctx));
392 net->out_ctx = (core_output_filter_ctx_t *)ctx;
394 * Need to create tmp brigade with correct lifetime. Passing
395 * NULL to apr_brigade_split_ex would result in a brigade
396 * allocated from bb->pool which might be wrong.
398 ctx->tmp_flush_bb = apr_brigade_create(c->pool, c->bucket_alloc);
399 /* same for buffered_bb and ap_save_brigade */
400 ctx->buffered_bb = apr_brigade_create(c->pool, c->bucket_alloc);
406 if ((ctx->buffered_bb != NULL) &&
407 !APR_BRIGADE_EMPTY(ctx->buffered_bb)) {
408 if (new_bb != NULL) {
409 APR_BRIGADE_PREPEND(bb, ctx->buffered_bb);
412 bb = ctx->buffered_bb;
414 c->data_in_output_filters = 0;
416 else if (new_bb == NULL) {
420 /* Scan through the brigade and decide whether to attempt a write,
421 * and how much to write, based on the following rules:
423 * 1) The new_bb is null: Do a nonblocking write of as much as
424 * possible: do a nonblocking write of as much data as possible,
425 * then save the rest in ctx->buffered_bb. (If new_bb == NULL,
426 * it probably means that the MPM is doing asynchronous write
427 * completion and has just determined that this connection
430 * 2) Determine if and up to which bucket we need to do a blocking
433 * a) The brigade contains a flush bucket: Do a blocking write
434 * of everything up that point.
436 * b) The request is in CONN_STATE_HANDLER state, and the brigade
437 * contains at least THRESHOLD_MAX_BUFFER bytes in non-file
438 * buckets: Do blocking writes until the amount of data in the
439 * buffer is less than THRESHOLD_MAX_BUFFER. (The point of this
440 * rule is to provide flow control, in case a handler is
441 * streaming out lots of data faster than the data can be
442 * sent to the client.)
444 * c) The request is in CONN_STATE_HANDLER state, and the brigade
445 * contains at least MAX_REQUESTS_IN_PIPELINE EOR buckets:
446 * Do blocking writes until less than MAX_REQUESTS_IN_PIPELINE EOR
447 * buckets are left. (The point of this rule is to prevent too many
448 * FDs being kept open by pipelined requests, possibly allowing a
451 * d) The brigade contains a morphing bucket: If there was no other
452 * reason to do a blocking write yet, try reading the bucket. If its
453 * contents fit into memory before THRESHOLD_MAX_BUFFER is reached,
454 * everything is fine. Otherwise we need to do a blocking write the
455 * up to and including the morphing bucket, because ap_save_brigade()
456 * would read the whole bucket into memory later on.
458 * 3) Actually do the blocking write up to the last bucket determined
459 * by rules 2a-d. The point of doing only one flush is to make as
460 * few calls to writev() as possible.
462 * 4) If the brigade contains at least THRESHOLD_MIN_WRITE
463 * bytes: Do a nonblocking write of as much data as possible,
464 * then save the rest in ctx->buffered_bb.
467 if (new_bb == NULL) {
468 rv = send_brigade_nonblocking(net->client_socket, bb,
469 &(ctx->bytes_written), c);
470 if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
471 /* The client has aborted the connection */
472 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
473 "core_output_filter: writing data to the network");
474 apr_brigade_cleanup(bb);
478 setaside_remaining_output(f, ctx, bb, c);
482 bytes_in_brigade = 0;
483 non_file_bytes_in_brigade = 0;
484 eor_buckets_in_brigade = 0;
485 morphing_bucket_in_brigade = 0;
487 for (bucket = APR_BRIGADE_FIRST(bb); bucket != APR_BRIGADE_SENTINEL(bb);
489 next = APR_BUCKET_NEXT(bucket);
491 if (!APR_BUCKET_IS_METADATA(bucket)) {
492 if (bucket->length == (apr_size_t)-1) {
494 * A setaside of morphing buckets would read everything into
495 * memory. Instead, we will flush everything up to and
496 * including this bucket.
498 morphing_bucket_in_brigade = 1;
501 bytes_in_brigade += bucket->length;
502 if (!APR_BUCKET_IS_FILE(bucket))
503 non_file_bytes_in_brigade += bucket->length;
506 else if (AP_BUCKET_IS_EOR(bucket)) {
507 eor_buckets_in_brigade++;
510 if (APR_BUCKET_IS_FLUSH(bucket)
511 || non_file_bytes_in_brigade >= THRESHOLD_MAX_BUFFER
512 || morphing_bucket_in_brigade
513 || eor_buckets_in_brigade > MAX_REQUESTS_IN_PIPELINE) {
514 /* this segment of the brigade MUST be sent before returning. */
516 if (APLOGctrace6(c)) {
517 char *reason = APR_BUCKET_IS_FLUSH(bucket) ?
519 (non_file_bytes_in_brigade >= THRESHOLD_MAX_BUFFER) ?
520 "THRESHOLD_MAX_BUFFER" :
521 morphing_bucket_in_brigade ? "morphing bucket" :
522 "MAX_REQUESTS_IN_PIPELINE";
523 ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, c,
524 "core_output_filter: flushing because of %s",
528 * Defer the actual blocking write to avoid doing many writes.
532 bytes_in_brigade = 0;
533 non_file_bytes_in_brigade = 0;
534 eor_buckets_in_brigade = 0;
535 morphing_bucket_in_brigade = 0;
539 if (flush_upto != NULL) {
540 ctx->tmp_flush_bb = apr_brigade_split_ex(bb, flush_upto,
542 rv = send_brigade_blocking(net->client_socket, bb,
543 &(ctx->bytes_written), c);
544 if (rv != APR_SUCCESS) {
545 /* The client has aborted the connection */
546 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
547 "core_output_filter: writing data to the network");
548 apr_brigade_cleanup(bb);
552 APR_BRIGADE_CONCAT(bb, ctx->tmp_flush_bb);
555 if (bytes_in_brigade >= THRESHOLD_MIN_WRITE) {
556 rv = send_brigade_nonblocking(net->client_socket, bb,
557 &(ctx->bytes_written), c);
558 if ((rv != APR_SUCCESS) && (!APR_STATUS_IS_EAGAIN(rv))) {
559 /* The client has aborted the connection */
560 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
561 "core_output_filter: writing data to the network");
562 apr_brigade_cleanup(bb);
568 setaside_remaining_output(f, ctx, bb, c);
573 * This function assumes that either ctx->buffered_bb == NULL, or
574 * ctx->buffered_bb is empty, or ctx->buffered_bb == bb
576 static void setaside_remaining_output(ap_filter_t *f,
577 core_output_filter_ctx_t *ctx,
578 apr_bucket_brigade *bb,
584 remove_empty_buckets(bb);
585 if (!APR_BRIGADE_EMPTY(bb)) {
586 c->data_in_output_filters = 1;
587 if (bb != ctx->buffered_bb) {
588 if (!ctx->deferred_write_pool) {
589 apr_pool_create(&ctx->deferred_write_pool, c->pool);
590 apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
592 ap_save_brigade(f, &(ctx->buffered_bb), &bb,
593 ctx->deferred_write_pool);
596 else if (ctx->deferred_write_pool) {
598 * There are no more requests in the pipeline. We can just clear the
601 apr_pool_clear(ctx->deferred_write_pool);
605 #ifndef APR_MAX_IOVEC_SIZE
606 #define MAX_IOVEC_TO_WRITE 16
608 #if APR_MAX_IOVEC_SIZE > 16
609 #define MAX_IOVEC_TO_WRITE 16
611 #define MAX_IOVEC_TO_WRITE APR_MAX_IOVEC_SIZE
615 static apr_status_t send_brigade_nonblocking(apr_socket_t *s,
616 apr_bucket_brigade *bb,
617 apr_size_t *bytes_written,
620 apr_bucket *bucket, *next;
622 struct iovec vec[MAX_IOVEC_TO_WRITE];
625 remove_empty_buckets(bb);
627 for (bucket = APR_BRIGADE_FIRST(bb);
628 bucket != APR_BRIGADE_SENTINEL(bb);
630 next = APR_BUCKET_NEXT(bucket);
632 if (APR_BUCKET_IS_FILE(bucket)) {
633 apr_bucket_file *file_bucket = (apr_bucket_file *)(bucket->data);
634 apr_file_t *fd = file_bucket->fd;
635 /* Use sendfile to send this file unless:
636 * - the platform doesn't support sendfile,
637 * - the file is too small for sendfile to be useful, or
638 * - sendfile is disabled in the httpd config via "EnableSendfile off"
641 if ((apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) &&
642 (bucket->length >= AP_MIN_SENDFILE_BYTES)) {
644 (void)apr_socket_opt_set(s, APR_TCP_NOPUSH, 1);
645 rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
646 if (rv != APR_SUCCESS) {
647 (void)apr_socket_opt_set(s, APR_TCP_NOPUSH, 0);
651 rv = sendfile_nonblocking(s, bucket, bytes_written, c);
653 (void)apr_socket_opt_set(s, APR_TCP_NOPUSH, 0);
656 if (rv != APR_SUCCESS) {
662 #endif /* APR_HAS_SENDFILE */
663 /* didn't sendfile */
664 if (!APR_BUCKET_IS_METADATA(bucket)) {
668 /* Non-blocking read first, in case this is a morphing
670 rv = apr_bucket_read(bucket, &data, &length, APR_NONBLOCK_READ);
671 if (APR_STATUS_IS_EAGAIN(rv)) {
672 /* Read would block; flush any pending data and retry. */
674 rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
681 rv = apr_bucket_read(bucket, &data, &length, APR_BLOCK_READ);
683 if (rv != APR_SUCCESS) {
687 /* reading may have split the bucket, so recompute next: */
688 next = APR_BUCKET_NEXT(bucket);
689 vec[nvec].iov_base = (char *)data;
690 vec[nvec].iov_len = length;
692 if (nvec == MAX_IOVEC_TO_WRITE) {
693 rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
695 if (rv != APR_SUCCESS) {
704 rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
705 if (rv != APR_SUCCESS) {
710 remove_empty_buckets(bb);
715 static void remove_empty_buckets(apr_bucket_brigade *bb)
718 while (((bucket = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) &&
719 (APR_BUCKET_IS_METADATA(bucket) || (bucket->length == 0))) {
720 apr_bucket_delete(bucket);
724 static apr_status_t send_brigade_blocking(apr_socket_t *s,
725 apr_bucket_brigade *bb,
726 apr_size_t *bytes_written,
732 while (!APR_BRIGADE_EMPTY(bb)) {
733 rv = send_brigade_nonblocking(s, bb, bytes_written, c);
734 if (rv != APR_SUCCESS) {
735 if (APR_STATUS_IS_EAGAIN(rv)) {
736 /* Wait until we can send more data */
738 apr_interval_time_t timeout;
739 apr_pollfd_t pollset;
742 pollset.desc_type = APR_POLL_SOCKET;
743 pollset.reqevents = APR_POLLOUT;
745 apr_socket_timeout_get(s, &timeout);
747 rv = apr_poll(&pollset, 1, &nsds, timeout);
748 } while (APR_STATUS_IS_EINTR(rv));
749 if (rv != APR_SUCCESS) {
761 static apr_status_t writev_nonblocking(apr_socket_t *s,
762 struct iovec *vec, apr_size_t nvec,
763 apr_bucket_brigade *bb,
764 apr_size_t *cumulative_bytes_written,
767 apr_status_t rv = APR_SUCCESS, arv;
768 apr_size_t bytes_written = 0, bytes_to_write = 0;
769 apr_size_t i, offset;
770 apr_interval_time_t old_timeout;
772 arv = apr_socket_timeout_get(s, &old_timeout);
773 if (arv != APR_SUCCESS) {
776 arv = apr_socket_timeout_set(s, 0);
777 if (arv != APR_SUCCESS) {
781 for (i = 0; i < nvec; i++) {
782 bytes_to_write += vec[i].iov_len;
785 while (bytes_written < bytes_to_write) {
787 rv = apr_socket_sendv(s, vec + offset, nvec - offset, &n);
790 for (i = offset; i < nvec; ) {
791 apr_bucket *bucket = APR_BRIGADE_FIRST(bb);
792 if (APR_BUCKET_IS_METADATA(bucket)) {
793 apr_bucket_delete(bucket);
795 else if (n >= vec[i].iov_len) {
796 apr_bucket_delete(bucket);
798 n -= vec[i++].iov_len;
801 apr_bucket_split(bucket, n);
802 apr_bucket_delete(bucket);
804 vec[i].iov_base = (char *) vec[i].iov_base + n;
809 if (rv != APR_SUCCESS) {
813 if ((ap__logio_add_bytes_out != NULL) && (bytes_written > 0)) {
814 ap__logio_add_bytes_out(c, bytes_written);
816 *cumulative_bytes_written += bytes_written;
818 arv = apr_socket_timeout_set(s, old_timeout);
819 if ((arv != APR_SUCCESS) && (rv == APR_SUCCESS)) {
829 static apr_status_t sendfile_nonblocking(apr_socket_t *s,
831 apr_size_t *cumulative_bytes_written,
834 apr_status_t rv = APR_SUCCESS;
835 apr_bucket_file *file_bucket;
837 apr_size_t file_length;
838 apr_off_t file_offset;
839 apr_size_t bytes_written = 0;
841 if (!APR_BUCKET_IS_FILE(bucket)) {
842 ap_log_error(APLOG_MARK, APLOG_ERR, rv, c->base_server, APLOGNO(00006)
843 "core_filter: sendfile_nonblocking: "
844 "this should never happen");
847 file_bucket = (apr_bucket_file *)(bucket->data);
848 fd = file_bucket->fd;
849 file_length = bucket->length;
850 file_offset = bucket->start;
852 if (bytes_written < file_length) {
853 apr_size_t n = file_length - bytes_written;
855 apr_interval_time_t old_timeout;
857 arv = apr_socket_timeout_get(s, &old_timeout);
858 if (arv != APR_SUCCESS) {
861 arv = apr_socket_timeout_set(s, 0);
862 if (arv != APR_SUCCESS) {
865 rv = apr_socket_sendfile(s, fd, NULL, &file_offset, &n, 0);
866 if (rv == APR_SUCCESS) {
870 arv = apr_socket_timeout_set(s, old_timeout);
871 if ((arv != APR_SUCCESS) && (rv == APR_SUCCESS)) {
875 if ((ap__logio_add_bytes_out != NULL) && (bytes_written > 0)) {
876 ap__logio_add_bytes_out(c, bytes_written);
878 *cumulative_bytes_written += bytes_written;
879 if ((bytes_written < file_length) && (bytes_written > 0)) {
880 apr_bucket_split(bucket, bytes_written);
881 apr_bucket_delete(bucket);
883 else if (bytes_written == file_length) {
884 apr_bucket_delete(bucket);