1 /* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <apr_pools.h>
19 #include <apr_thread_mutex.h>
20 #include <apr_thread_cond.h>
23 #include <http_core.h>
25 #include <http_connection.h>
27 #include "h2_private.h"
31 #include "h2_response.h"
32 #include "h2_request.h"
36 h2_io *h2_io_create(int id, apr_pool_t *pool, const h2_request *request)
38 h2_io *io = apr_pcalloc(pool, sizeof(*io));
42 io->bucket_alloc = apr_bucket_alloc_create(pool);
43 io->request = h2_request_clone(pool, request);
48 static void check_bbin(h2_io *io)
51 io->bbin = apr_brigade_create(io->pool, io->bucket_alloc);
55 static void check_bbout(h2_io *io)
58 io->bbout = apr_brigade_create(io->pool, io->bucket_alloc);
62 static void check_bbtmp(h2_io *io)
65 io->bbtmp = apr_brigade_create(io->pool, io->bucket_alloc);
69 static void append_eos(h2_io *io, apr_bucket_brigade *bb)
71 APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(io->bucket_alloc));
74 void h2_io_redo(h2_io *io)
76 io->worker_started = 0;
80 apr_brigade_cleanup(io->bbin);
83 apr_brigade_cleanup(io->bbout);
86 apr_brigade_cleanup(io->bbtmp);
88 io->started_at = io->done_at = 0;
91 int h2_io_is_repeatable(h2_io *io) {
93 || io->input_consumed > 0
95 /* cannot repeat that. */
98 return (!strcmp("GET", io->request->method)
99 || !strcmp("HEAD", io->request->method)
100 || !strcmp("OPTIONS", io->request->method));
103 void h2_io_set_response(h2_io *io, h2_response *response)
105 AP_DEBUG_ASSERT(io->pool);
106 AP_DEBUG_ASSERT(response);
107 AP_DEBUG_ASSERT(!io->response);
108 io->response = h2_response_clone(io->pool, response);
109 if (response->rst_error) {
110 h2_io_rst(io, response->rst_error);
114 void h2_io_rst(h2_io *io, int error)
116 io->rst_error = error;
120 int h2_io_out_has_data(h2_io *io)
122 return io->bbout && h2_util_bb_has_data_or_eos(io->bbout);
125 apr_off_t h2_io_out_length(h2_io *io)
129 apr_brigade_length(io->bbout, 0, &len);
130 return (len > 0)? len : 0;
135 apr_status_t h2_io_in_shutdown(h2_io *io)
138 apr_off_t end_len = 0;
139 apr_brigade_length(io->bbin, 1, &end_len);
140 io->input_consumed += end_len;
141 apr_brigade_cleanup(io->bbin);
143 return h2_io_in_close(io);
147 void h2_io_signal_init(h2_io *io, h2_io_op op, apr_interval_time_t timeout,
148 apr_thread_cond_t *cond)
151 io->timed_cond = cond;
153 io->timeout_at = apr_time_now() + timeout;
160 void h2_io_signal_exit(h2_io *io)
162 io->timed_cond = NULL;
166 apr_status_t h2_io_signal_wait(h2_mplx *m, h2_io *io)
170 if (io->timeout_at != 0) {
171 status = apr_thread_cond_timedwait(io->timed_cond, m->lock, io->timeout_at);
172 if (APR_STATUS_IS_TIMEUP(status)) {
173 ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, APLOGNO(03055)
174 "h2_mplx(%ld-%d): stream timeout expired: %s",
176 (io->timed_op == H2_IO_READ)? "read" : "write");
177 h2_io_rst(io, H2_ERR_CANCEL);
181 apr_thread_cond_wait(io->timed_cond, m->lock);
182 status = APR_SUCCESS;
184 if (io->orphaned && status == APR_SUCCESS) {
185 return APR_ECONNABORTED;
190 void h2_io_signal(h2_io *io, h2_io_op op)
192 if (io->timed_cond && (io->timed_op == op || H2_IO_ANY == op)) {
193 apr_thread_cond_signal(io->timed_cond);
197 void h2_io_make_orphaned(h2_io *io, int error)
201 h2_io_rst(io, error);
203 /* if someone is waiting, wake him up */
204 h2_io_signal(io, H2_IO_ANY);
207 static int add_trailer(void *ctx, const char *key, const char *value)
209 apr_bucket_brigade *bb = ctx;
212 status = apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n",
214 return (status == APR_SUCCESS);
217 static apr_status_t in_append_eos(h2_io *io, apr_bucket_brigade *bb,
218 apr_table_t *trailers)
220 apr_status_t status = APR_SUCCESS;
221 apr_table_t *t = io->request->trailers;
223 if (trailers && t && !apr_is_empty_table(trailers)) {
224 /* trailers passed in, transfer directly. */
225 apr_table_overlap(trailers, t, APR_OVERLAP_TABLES_SET);
229 if (io->request->chunked) {
230 if (t && !apr_is_empty_table(t)) {
231 /* no trailers passed in, transfer via chunked */
232 status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
233 apr_table_do(add_trailer, bb, t, NULL);
234 status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
237 status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
244 apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb,
245 apr_size_t maxlen, apr_table_t *trailers)
247 apr_off_t start_len = 0;
251 return APR_ECONNABORTED;
254 if (!io->bbin || APR_BRIGADE_EMPTY(io->bbin)) {
256 if (!io->eos_in_written) {
257 status = in_append_eos(io, bb, trailers);
258 io->eos_in_written = 1;
266 if (io->request->chunked) {
267 /* the reader expects HTTP/1.1 chunked encoding */
269 status = h2_util_move(io->bbtmp, io->bbin, maxlen, NULL, "h2_io_in_read_chunk");
270 if (status == APR_SUCCESS) {
271 apr_off_t tmp_len = 0;
273 apr_brigade_length(io->bbtmp, 1, &tmp_len);
275 io->input_consumed += tmp_len;
276 status = apr_brigade_printf(bb, NULL, NULL, "%lx\r\n",
277 (unsigned long)tmp_len);
278 if (status == APR_SUCCESS) {
279 status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp1");
280 if (status == APR_SUCCESS) {
281 status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
286 status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp2");
288 apr_brigade_cleanup(io->bbtmp);
292 apr_brigade_length(bb, 1, &start_len);
294 status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read");
295 if (status == APR_SUCCESS) {
296 apr_off_t end_len = 0;
297 apr_brigade_length(bb, 1, &end_len);
298 io->input_consumed += (end_len - start_len);
302 if (status == APR_SUCCESS && (!io->bbin || APR_BRIGADE_EMPTY(io->bbin))) {
304 if (!io->eos_in_written) {
305 status = in_append_eos(io, bb, trailers);
306 io->eos_in_written = 1;
311 if (status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) {
317 apr_status_t h2_io_in_write(h2_io *io, const char *d, apr_size_t len, int eos)
320 return APR_ECONNABORTED;
331 return apr_brigade_write(io->bbin, NULL, NULL, d, len);
336 apr_status_t h2_io_in_close(h2_io *io)
339 return APR_ECONNABORTED;
346 static int is_out_readable(h2_io *io, apr_off_t *plen, int *peos,
350 *ps = APR_ECONNABORTED;
353 if (io->eos_out_read) {
359 else if (!io->bbout) {
368 apr_status_t h2_io_out_readx(h2_io *io,
369 h2_io_data_cb *cb, void *ctx,
370 apr_off_t *plen, int *peos)
373 if (!is_out_readable(io, plen, peos, &status)) {
377 /* just checking length available */
378 status = h2_util_bb_avail(io->bbout, plen, peos);
381 status = h2_util_bb_readx(io->bbout, cb, ctx, plen, peos);
382 if (status == APR_SUCCESS) {
383 io->eos_out_read = *peos;
384 io->output_consumed += *plen;
390 apr_status_t h2_io_out_read_to(h2_io *io, apr_bucket_brigade *bb,
391 apr_off_t *plen, int *peos)
394 if (!is_out_readable(io, plen, peos, &status)) {
397 io->eos_out_read = *peos = h2_util_has_eos(io->bbout, *plen);
398 status = h2_util_move(bb, io->bbout, *plen, NULL, "h2_io_read_to");
399 io->output_consumed += *plen;
403 static void process_trailers(h2_io *io, apr_table_t *trailers)
405 if (trailers && io->response) {
406 h2_response_set_trailers(io->response,
407 apr_table_clone(io->pool, trailers));
411 apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
412 apr_size_t maxlen, apr_table_t *trailers,
413 apr_size_t *pfile_buckets_allowed)
419 return APR_ECONNABORTED;
424 /* We have already delivered an EOS bucket to a reader, no
425 * sense in storing anything more here.
427 status = apr_brigade_length(bb, 1, &len);
428 if (status == APR_SUCCESS) {
430 /* someone tries to write real data after EOS, that
431 * does not look right. */
434 /* cleanup, as if we had moved the data */
435 apr_brigade_cleanup(bb);
440 process_trailers(io, trailers);
442 /* Let's move the buckets from the request processing in here, so
443 * that the main thread can read them when it has time/capacity.
445 * Move at most "maxlen" memory bytes. If buckets remain, it is
446 * the caller's responsibility to take care of this.
448 * We allow passing of file buckets as long as we do not have too
449 * many open files already buffered. Otherwise we will run out of
453 start_allowed = *pfile_buckets_allowed;
454 status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed,
456 /* track # file buckets moved into our pool */
457 if (start_allowed != *pfile_buckets_allowed) {
458 io->files_handles_owned += (start_allowed - *pfile_buckets_allowed);
464 apr_status_t h2_io_out_close(h2_io *io, apr_table_t *trailers)
467 return APR_ECONNABORTED;
469 if (!io->eos_out_read) { /* EOS has not been read yet */
470 process_trailers(io, trailers);
474 if (!h2_util_has_eos(io->bbout, -1)) {
475 append_eos(io, io->bbout);