1 /* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <apr_pools.h>
19 #include <apr_thread_mutex.h>
20 #include <apr_thread_cond.h>
23 #include <http_core.h>
25 #include <http_connection.h>
26 #include <http_request.h>
28 #include "h2_private.h"
32 #include "h2_response.h"
33 #include "h2_request.h"
37 h2_io *h2_io_create(int id, apr_pool_t *pool,
38 apr_bucket_alloc_t *bucket_alloc,
39 const h2_request *request)
41 h2_io *io = apr_pcalloc(pool, sizeof(*io));
45 io->bucket_alloc = bucket_alloc;
46 io->request = h2_request_clone(pool, request);
51 static void check_bbin(h2_io *io)
54 io->bbin = apr_brigade_create(io->pool, io->bucket_alloc);
58 static void check_bbout(h2_io *io)
61 io->bbout = apr_brigade_create(io->pool, io->bucket_alloc);
65 static void check_bbtmp(h2_io *io)
68 io->bbtmp = apr_brigade_create(io->pool, io->bucket_alloc);
72 static void append_eos(h2_io *io, apr_bucket_brigade *bb)
74 APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(io->bucket_alloc));
77 void h2_io_redo(h2_io *io)
79 io->worker_started = 0;
83 apr_brigade_cleanup(io->bbin);
86 apr_brigade_cleanup(io->bbout);
89 apr_brigade_cleanup(io->bbtmp);
91 io->started_at = io->done_at = 0;
94 int h2_io_is_repeatable(h2_io *io) {
96 || io->input_consumed > 0
98 /* cannot repeat that. */
101 return (!strcmp("GET", io->request->method)
102 || !strcmp("HEAD", io->request->method)
103 || !strcmp("OPTIONS", io->request->method));
106 void h2_io_set_response(h2_io *io, h2_response *response)
108 AP_DEBUG_ASSERT(io->pool);
109 AP_DEBUG_ASSERT(response);
110 AP_DEBUG_ASSERT(!io->response);
111 io->response = h2_response_clone(io->pool, response);
112 if (response->rst_error) {
113 h2_io_rst(io, response->rst_error);
117 void h2_io_rst(h2_io *io, int error)
119 io->rst_error = error;
123 int h2_io_out_has_data(h2_io *io)
125 return io->bbout && h2_util_bb_has_data_or_eos(io->bbout);
128 apr_off_t h2_io_out_length(h2_io *io)
132 apr_brigade_length(io->bbout, 0, &len);
133 return (len > 0)? len : 0;
138 apr_status_t h2_io_in_shutdown(h2_io *io)
141 apr_off_t end_len = 0;
142 apr_brigade_length(io->bbin, 1, &end_len);
143 io->input_consumed += end_len;
144 apr_brigade_cleanup(io->bbin);
146 return h2_io_in_close(io);
150 void h2_io_signal_init(h2_io *io, h2_io_op op, apr_interval_time_t timeout,
151 apr_thread_cond_t *cond)
154 io->timed_cond = cond;
156 io->timeout_at = apr_time_now() + timeout;
163 void h2_io_signal_exit(h2_io *io)
165 io->timed_cond = NULL;
169 apr_status_t h2_io_signal_wait(h2_mplx *m, h2_io *io)
173 if (io->timeout_at != 0) {
174 status = apr_thread_cond_timedwait(io->timed_cond, m->lock, io->timeout_at);
175 if (APR_STATUS_IS_TIMEUP(status)) {
176 ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, APLOGNO(03055)
177 "h2_mplx(%ld-%d): stream timeout expired: %s",
179 (io->timed_op == H2_IO_READ)? "read" : "write");
180 h2_io_rst(io, H2_ERR_CANCEL);
184 apr_thread_cond_wait(io->timed_cond, m->lock);
185 status = APR_SUCCESS;
187 if (io->orphaned && status == APR_SUCCESS) {
188 return APR_ECONNABORTED;
193 void h2_io_signal(h2_io *io, h2_io_op op)
195 if (io->timed_cond && (io->timed_op == op || H2_IO_ANY == op)) {
196 apr_thread_cond_signal(io->timed_cond);
200 void h2_io_make_orphaned(h2_io *io, int error)
204 h2_io_rst(io, error);
206 /* if someone is waiting, wake him up */
207 h2_io_signal(io, H2_IO_ANY);
210 static int add_trailer(void *ctx, const char *key, const char *value)
212 apr_bucket_brigade *bb = ctx;
215 status = apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n",
217 return (status == APR_SUCCESS);
220 static apr_status_t in_append_eos(h2_io *io, apr_bucket_brigade *bb,
221 apr_table_t *trailers)
223 apr_status_t status = APR_SUCCESS;
224 apr_table_t *t = io->request->trailers;
226 if (trailers && t && !apr_is_empty_table(trailers)) {
227 /* trailers passed in, transfer directly. */
228 apr_table_overlap(trailers, t, APR_OVERLAP_TABLES_SET);
232 if (io->request->chunked) {
233 if (t && !apr_is_empty_table(t)) {
234 /* no trailers passed in, transfer via chunked */
235 status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
236 apr_table_do(add_trailer, bb, t, NULL);
237 status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
240 status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
247 apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb,
248 apr_size_t maxlen, apr_table_t *trailers)
250 apr_off_t start_len = 0;
254 return APR_ECONNABORTED;
257 if (!io->bbin || APR_BRIGADE_EMPTY(io->bbin)) {
259 if (!io->eos_in_written) {
260 status = in_append_eos(io, bb, trailers);
261 io->eos_in_written = 1;
269 if (io->request->chunked) {
270 /* the reader expects HTTP/1.1 chunked encoding */
272 status = h2_util_move(io->bbtmp, io->bbin, maxlen, NULL, "h2_io_in_read_chunk");
273 if (status == APR_SUCCESS) {
274 apr_off_t tmp_len = 0;
276 apr_brigade_length(io->bbtmp, 1, &tmp_len);
278 io->input_consumed += tmp_len;
279 status = apr_brigade_printf(bb, NULL, NULL, "%lx\r\n",
280 (unsigned long)tmp_len);
281 if (status == APR_SUCCESS) {
282 status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp1");
283 if (status == APR_SUCCESS) {
284 status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
289 status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp2");
291 apr_brigade_cleanup(io->bbtmp);
295 apr_brigade_length(bb, 1, &start_len);
297 status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read");
298 if (status == APR_SUCCESS) {
299 apr_off_t end_len = 0;
300 apr_brigade_length(bb, 1, &end_len);
301 io->input_consumed += (end_len - start_len);
305 if (status == APR_SUCCESS && (!io->bbin || APR_BRIGADE_EMPTY(io->bbin))) {
307 if (!io->eos_in_written) {
308 status = in_append_eos(io, bb, trailers);
309 io->eos_in_written = 1;
314 if (status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) {
320 apr_status_t h2_io_in_write(h2_io *io, const char *d, apr_size_t len, int eos)
323 return APR_ECONNABORTED;
334 return apr_brigade_write(io->bbin, NULL, NULL, d, len);
339 apr_status_t h2_io_in_close(h2_io *io)
342 return APR_ECONNABORTED;
349 static int is_out_readable(h2_io *io, apr_off_t *plen, int *peos,
353 *ps = APR_ECONNABORTED;
356 if (io->eos_out_read) {
362 else if (!io->bbout) {
371 apr_status_t h2_io_out_readx(h2_io *io,
372 h2_io_data_cb *cb, void *ctx,
373 apr_off_t *plen, int *peos)
376 if (!is_out_readable(io, plen, peos, &status)) {
380 /* just checking length available */
381 status = h2_util_bb_avail(io->bbout, plen, peos);
384 status = h2_util_bb_readx(io->bbout, cb, ctx, plen, peos);
385 if (status == APR_SUCCESS) {
386 io->eos_out_read = *peos;
387 io->output_consumed += *plen;
393 apr_status_t h2_io_out_read_to(h2_io *io, apr_bucket_brigade *bb,
394 apr_off_t *plen, int *peos)
397 if (!is_out_readable(io, plen, peos, &status)) {
400 status = h2_util_move(bb, io->bbout, *plen, NULL, "h2_io_read_to");
401 if (status == APR_SUCCESS && io->eos_out && APR_BRIGADE_EMPTY(io->bbout)) {
402 io->eos_out_read = *peos = 1;
404 io->output_consumed += *plen;
408 static void process_trailers(h2_io *io, apr_table_t *trailers)
410 if (trailers && io->response) {
411 h2_response_set_trailers(io->response,
412 apr_table_clone(io->pool, trailers));
416 apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
417 apr_size_t maxlen, apr_table_t *trailers,
418 apr_size_t *pfile_buckets_allowed)
425 return APR_ECONNABORTED;
428 /* Filter the EOR bucket and set it aside. We prefer to tear down
429 * the request when the whole h2 stream is done */
430 for (b = APR_BRIGADE_FIRST(bb);
431 b != APR_BRIGADE_SENTINEL(bb);
432 b = APR_BUCKET_NEXT(b))
434 if (AP_BUCKET_IS_EOR(b)) {
435 APR_BUCKET_REMOVE(b);
439 else if (APR_BUCKET_IS_EOS(b)) {
445 process_trailers(io, trailers);
447 /* Let's move the buckets from the request processing in here, so
448 * that the main thread can read them when it has time/capacity.
450 * Move at most "maxlen" memory bytes. If buckets remain, it is
451 * the caller's responsibility to take care of this.
453 * We allow passing of file buckets as long as we do not have too
454 * many open files already buffered. Otherwise we will run out of
458 start_allowed = *pfile_buckets_allowed;
459 status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed,
461 /* track # file buckets moved into our pool */
462 if (start_allowed != *pfile_buckets_allowed) {
463 io->files_handles_owned += (start_allowed - *pfile_buckets_allowed);
469 apr_status_t h2_io_out_close(h2_io *io, apr_table_t *trailers)
472 return APR_ECONNABORTED;
474 if (!io->eos_out_read) { /* EOS has not been read yet */
475 process_trailers(io, trailers);
479 if (!h2_util_has_eos(io->bbout, -1)) {
480 append_eos(io, io->bbout);