From: Jim Jagielski Date: Tue, 30 Jun 2015 15:52:41 +0000 (+0000) Subject: Fold in latest changes. X-Git-Tag: 2.5.0-alpha~3029 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=4e8eff891085245d26490b0ad4a48d9fa37eac4b;p=apache Fold in latest changes. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1688475 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/modules/http2/ChangeLog b/modules/http2/ChangeLog index e7b03edfc2..0b71d8672b 100644 --- a/modules/http2/ChangeLog +++ b/modules/http2/ChangeLog @@ -1,3 +1,21 @@ +v0.8.0 +-------------------------------------------------------------------------------- + * when serving static files, these are kept open longer (and buffer copied + less). Use config "H2SessionExtraFiles" to change the value. The more you + can give per connection, the better. But you may run out of open file + handles, if not careful. Default value is 5, which seems to work on my + config with mpms worker, event and prefork. Basically, eaach httpd process + needs at least #connections + #workers + (#conn * #extrafiles) available. + * main connection buffering, buffer sizes and write sizes are configurable, + see README.md for the list of options. buffer sizes and write sizes influence + the TLS performance. Defaults seem reasonably, but ymmv. + * general performance improved due to better handling of stream close and + resource cleanup + * prefork mpm confirmed working. Unless configured otherwise, each session + will only have 1 worker thread. Raise this with "H2MaxWorkers" + * changed sandbox cipher config to mozilla recommendation (thanks Lucas) + * sandbox update to nghttp2 1.0.5 + v0.7.3 -------------------------------------------------------------------------------- * sandbox update to nghttp2 1.0.4 diff --git a/modules/http2/README.md b/modules/http2/README.md index b6441c87f5..7bb0f39798 100644 --- a/modules/http2/README.md +++ b/modules/http2/README.md @@ -57,6 +57,10 @@ such as: * H2SerializeHeaders (on/off), "off" serialize/parse request+response headers for streams, as if they arrived in HTTP/1 format. When off, certain parts of httpd core filters are disabled/replaced to allow for a more efficient handling. * H2HackMpmEvent (on/off), "on" performs a hack on internal connection in order to make mpm_event working, has no effect on other mpm modules * H2Direct (on/off), "on" to enable h2c direct mode on a non-TLS host, default: off +* H2BufferOutput (on/off), if output data shall be buffered. "on" for TLS connections, "off" otherwise +* H2BufferSize n size of output buffer (if enabled), defaults to 64k +* H2BufferWriteMax n max. number of bytes in a single write when buffering output, defaults to 16k +* H2SessionExtraFiles n number of extra file handles a session might keep open to improve performance, depends on mpm module used and ulimit of processes, defaults to 5 All these configuration parameters can be set on servers/virtual hosts and are not available on directory level. Note that Worker configuration is diff --git a/modules/http2/configure.ac b/modules/http2/configure.ac index 39c7b57b2a..ecb0aa69e8 100644 --- a/modules/http2/configure.ac +++ b/modules/http2/configure.ac @@ -14,7 +14,7 @@ # AC_PREREQ([2.69]) -AC_INIT([mod_h2], [0.7.3], [stefan.eissing@greenbytes.de]) +AC_INIT([mod_h2], [0.8.0], [stefan.eissing@greenbytes.de]) LT_PREREQ([2.2.6]) LT_INIT() @@ -123,9 +123,6 @@ if test x"$build_mode" = "xsandbox"; then # In sandbox build, we just define it therefore, as a quick workaround CPPFLAGS="$CPPFLAGS -DPATH_MAX=4096" - # we use a new nghttp2 in the sandbox which has these features - NGHTTP2_HAS_DATA_CB=1 - else # production, we need to find where the apxs is. which then # can tell us the various directories we need. @@ -153,9 +150,6 @@ else AC_CHECK_LIB([nghttp2], [nghttp2_session_server_new2], , [AC_MSG_ERROR("library nghttp2 not found")]) - AC_CHECK_LIB([nghttp2], [nghttp2_session_callbacks_set_send_data_callback], - [NGHTTP2_HAS_DATA_CB=1], [NGHTTP2_HAS_DATA_CB=0]) - fi # Checks for header files. @@ -180,7 +174,6 @@ AC_CHECK_PROG([A2ENMOD],[a2enmod]) AC_SUBST(BUILD_SUBDIRS) AC_SUBST(SYSCONF_DIR) AC_SUBST(LIBEXEC_DIR) -AC_SUBST(NGHTTP2_HAS_DATA_CB) AC_CONFIG_FILES([ Makefile diff --git a/modules/http2/mod_h2/h2_config.c b/modules/http2/mod_h2/h2_config.c index 01b6fb98cd..62c96634e4 100644 --- a/modules/http2/mod_h2/h2_config.c +++ b/modules/http2/mod_h2/h2_config.c @@ -49,6 +49,10 @@ static h2_config defconf = { 0, /* serialize headers */ 1, /* hack mpm event */ 1, /* h2 direct mode */ + -1, /* buffer output, by default only for TLS */ + 64*1024, /* buffer size */ + 16*1024, /* out write max */ + 5, /* # session extra files */ }; static void *h2_config_create(apr_pool_t *pool, @@ -63,19 +67,24 @@ static void *h2_config_create(apr_pool_t *pool, strcat(name, s); strcat(name, "]"); - conf->name = name; - conf->h2_enabled = DEF_VAL; - conf->h2_max_streams = DEF_VAL; - conf->h2_max_hl_size = DEF_VAL; - conf->h2_window_size = DEF_VAL; - conf->min_workers = DEF_VAL; - conf->max_workers = DEF_VAL; + conf->name = name; + conf->h2_enabled = DEF_VAL; + conf->h2_max_streams = DEF_VAL; + conf->h2_max_hl_size = DEF_VAL; + conf->h2_window_size = DEF_VAL; + conf->min_workers = DEF_VAL; + conf->max_workers = DEF_VAL; conf->max_worker_idle_secs = DEF_VAL; - conf->stream_max_mem_size = DEF_VAL; - conf->alt_svc_max_age = DEF_VAL; - conf->serialize_headers = DEF_VAL; - conf->hack_mpm_event = DEF_VAL; - conf->h2_direct = DEF_VAL; + conf->stream_max_mem_size = DEF_VAL; + conf->alt_svc_max_age = DEF_VAL; + conf->serialize_headers = DEF_VAL; + conf->hack_mpm_event = DEF_VAL; + conf->h2_direct = DEF_VAL; + conf->buffer_output = DEF_VAL; + conf->buffer_output = DEF_VAL; + conf->buffer_size = DEF_VAL; + conf->write_max = DEF_VAL; + conf->session_extra_files = DEF_VAL; return conf; } @@ -117,6 +126,10 @@ void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv) n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers); n->hack_mpm_event = H2_CONFIG_GET(add, base, hack_mpm_event); n->h2_direct = H2_CONFIG_GET(add, base, h2_direct); + n->buffer_output = H2_CONFIG_GET(add, base, buffer_output); + n->buffer_size = H2_CONFIG_GET(add, base, buffer_size); + n->write_max = H2_CONFIG_GET(add, base, write_max); + n->session_extra_files = H2_CONFIG_GET(add, base, session_extra_files); return n; } @@ -148,6 +161,14 @@ int h2_config_geti(h2_config *conf, h2_config_var_t var) return H2_CONFIG_GET(conf, &defconf, hack_mpm_event); case H2_CONF_DIRECT: return H2_CONFIG_GET(conf, &defconf, h2_direct); + case H2_CONF_BUFFER_OUTPUT: + return H2_CONFIG_GET(conf, &defconf, buffer_output); + case H2_CONF_BUFFER_SIZE: + return H2_CONFIG_GET(conf, &defconf, buffer_size); + case H2_CONF_WRITE_MAX: + return H2_CONFIG_GET(conf, &defconf, write_max); + case H2_CONF_SESSION_FILES: + return H2_CONFIG_GET(conf, &defconf, session_extra_files); default: return DEF_VAL; } @@ -185,6 +206,9 @@ static const char *h2_conf_set_max_streams(cmd_parms *parms, h2_config *cfg = h2_config_sget(parms->server); cfg->h2_max_streams = (int)apr_atoi64(value); (void)arg; + if (cfg->h2_max_streams < 1) { + return "value must be > 0"; + } return NULL; } @@ -194,6 +218,9 @@ static const char *h2_conf_set_window_size(cmd_parms *parms, h2_config *cfg = h2_config_sget(parms->server); cfg->h2_window_size = (int)apr_atoi64(value); (void)arg; + if (cfg->h2_window_size < 1024) { + return "value must be > 1k"; + } return NULL; } @@ -203,6 +230,9 @@ static const char *h2_conf_set_max_hl_size(cmd_parms *parms, h2_config *cfg = h2_config_sget(parms->server); cfg->h2_max_hl_size = (int)apr_atoi64(value); (void)arg; + if (cfg->h2_max_hl_size < 1024) { + return "value must be > 1k"; + } return NULL; } @@ -212,6 +242,9 @@ static const char *h2_conf_set_min_workers(cmd_parms *parms, h2_config *cfg = h2_config_sget(parms->server); cfg->min_workers = (int)apr_atoi64(value); (void)arg; + if (cfg->min_workers < 1) { + return "value must be > 1"; + } return NULL; } @@ -221,6 +254,9 @@ static const char *h2_conf_set_max_workers(cmd_parms *parms, h2_config *cfg = h2_config_sget(parms->server); cfg->max_workers = (int)apr_atoi64(value); (void)arg; + if (cfg->max_workers < 1) { + return "value must be > 1"; + } return NULL; } @@ -230,6 +266,9 @@ static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms, h2_config *cfg = h2_config_sget(parms->server); cfg->max_worker_idle_secs = (int)apr_atoi64(value); (void)arg; + if (cfg->max_worker_idle_secs < 1) { + return "value must be > 1"; + } return NULL; } @@ -241,6 +280,9 @@ static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms, cfg->stream_max_mem_size = (int)apr_atoi64(value); (void)arg; + if (cfg->stream_max_mem_size < 1024) { + return "value must be > 1k"; + } return NULL; } @@ -271,31 +313,114 @@ static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms, return NULL; } +static const char *h2_conf_set_buffer_size(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = h2_config_sget(parms->server); + apr_int64_t len = (int)apr_atoi64(value); + if (len < (16*1024)) { + return "value must be a positive number, at least 16k"; + } + cfg->buffer_size = (int)len; + (void)arg; + return NULL; +} + +static const char *h2_conf_set_write_max(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = h2_config_sget(parms->server); + apr_int64_t max = (int)apr_atoi64(value); + if (max <= 0) { + return "value must be a positive number"; + } + else if (max > cfg->buffer_size) { + return "value must be less than H2BufferSize"; + } + cfg->write_max = (int)max; + (void)arg; + return NULL; +} + +static const char *h2_conf_set_session_extra_files(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = h2_config_sget(parms->server); + apr_int64_t max = (int)apr_atoi64(value); + if (max <= 0) { + return "value must be a positive number"; + } + cfg->session_extra_files = (int)max; + (void)arg; + return NULL; +} + static const char *h2_conf_set_serialize_headers(cmd_parms *parms, void *arg, const char *value) { h2_config *cfg = h2_config_sget(parms->server); - cfg->serialize_headers = !apr_strnatcasecmp(value, "On"); + if (!strcasecmp(value, "On")) { + cfg->serialize_headers = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->serialize_headers = 0; + return NULL; + } + (void)arg; - return NULL; + return "value must be On or Off"; } static const char *h2_conf_set_hack_mpm_event(cmd_parms *parms, void *arg, const char *value) { h2_config *cfg = h2_config_sget(parms->server); - cfg->hack_mpm_event = !apr_strnatcasecmp(value, "On"); + if (!strcasecmp(value, "On")) { + cfg->hack_mpm_event = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->hack_mpm_event = 0; + return NULL; + } + (void)arg; - return NULL; + return "value must be On or Off"; } static const char *h2_conf_set_direct(cmd_parms *parms, void *arg, const char *value) { h2_config *cfg = h2_config_sget(parms->server); - cfg->h2_direct = !apr_strnatcasecmp(value, "On"); + if (!strcasecmp(value, "On")) { + cfg->h2_direct = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->h2_direct = 0; + return NULL; + } + (void)arg; - return NULL; + return "value must be On or Off"; +} + +static const char *h2_conf_set_buffer_output(cmd_parms *parms, + void *arg, const char *value) +{ + h2_config *cfg = h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { + cfg->buffer_output = 1; + return NULL; + } + else if (!strcasecmp(value, "Off")) { + cfg->buffer_output = 0; + return NULL; + } + + (void)arg; + return "value must be On or Off"; } #pragma GCC diagnostic ignored "-Wmissing-braces" @@ -325,7 +450,15 @@ const command_rec h2_cmds[] = { AP_INIT_TAKE1("H2HackMpmEvent", h2_conf_set_hack_mpm_event, NULL, RSRC_CONF, "on to enable a hack that makes mpm_event working with mod_h2"), AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL, - RSRC_CONF, "on to enable direct HTTP/2 mode on non-TLS"), + RSRC_CONF, "on to enable direct HTTP/2 mode"), + AP_INIT_TAKE1("H2BufferOutput", h2_conf_set_buffer_output, NULL, + RSRC_CONF, "on to enable output buffering, default for TLS"), + AP_INIT_TAKE1("H2BufferSize", h2_conf_set_buffer_size, NULL, + RSRC_CONF, "size of outgoing buffer in bytes"), + AP_INIT_TAKE1("H2BufferWriteMax", h2_conf_set_write_max, NULL, + RSRC_CONF, "maximum number of bytes in a outgoing write"), + AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL, + RSRC_CONF, "number of extra file a session might keep open"), { NULL, NULL, NULL, 0, 0, NULL } }; diff --git a/modules/http2/mod_h2/h2_config.h b/modules/http2/mod_h2/h2_config.h index a379475f5c..ba882a865c 100644 --- a/modules/http2/mod_h2/h2_config.h +++ b/modules/http2/mod_h2/h2_config.h @@ -37,6 +37,10 @@ typedef enum { H2_CONF_SER_HEADERS, H2_CONF_HACK_MPM_EVENT, H2_CONF_DIRECT, + H2_CONF_BUFFER_OUTPUT, + H2_CONF_BUFFER_SIZE, + H2_CONF_WRITE_MAX, + H2_CONF_SESSION_FILES, } h2_config_var_t; /* Apache httpd module configuration for h2. */ @@ -57,6 +61,10 @@ typedef struct h2_config { int hack_mpm_event; /* If mpm_event is detected, perform a hack on stream connections to make it work */ int h2_direct; /* if mod_h2 is active on non-TLS directly */ + int buffer_output; /* if output buffering shall be used */ + int buffer_size; /* size of buffer for outgoing data */ + int write_max; /* max number of bytes for a write op */ + int session_extra_files; /* # of extra files a session may keep open */ } h2_config; diff --git a/modules/http2/mod_h2/h2_conn.c b/modules/http2/mod_h2/h2_conn.c index 3639d7eef6..242ed107a9 100644 --- a/modules/http2/mod_h2/h2_conn.c +++ b/modules/http2/mod_h2/h2_conn.c @@ -106,10 +106,6 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) else if (!strcmp("prefork.c", m->name)) { mpm_type = H2_MPM_PREFORK; mpm_module = m; - /* prefork reports 1 thread per child, also as max */ - if (maxw == 1) { - maxw = 8; /* number of cores maybe? */ - } } else if (!strcmp("mod_ssl.c", m->name)) { ssl_module = m; diff --git a/modules/http2/mod_h2/h2_conn_io.c b/modules/http2/mod_h2/h2_conn_io.c index f29bbe379b..8cf2058f28 100644 --- a/modules/http2/mod_h2/h2_conn_io.c +++ b/modules/http2/mod_h2/h2_conn_io.c @@ -23,28 +23,35 @@ #include #include "h2_private.h" +#include "h2_config.h" #include "h2_conn_io.h" #include "h2_h2.h" #include "h2_util.h" -/* If we write directly to our brigade or use a char buffer to collect - * out data. - */ - -#define H2_CONN_IO_BUF_SIZE (64 * 1024) -#define H2_CONN_IO_SSL_WRITE_SIZE (16 * 1024) - - apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c) { + h2_config *cfg = h2_config_get(c); + io->connection = c; io->input = apr_brigade_create(c->pool, c->bucket_alloc); io->output = apr_brigade_create(c->pool, c->bucket_alloc); - io->buffer_output = h2_h2_is_tls(c); io->buflen = 0; + io->max_write_size = h2_config_geti(cfg, H2_CONF_WRITE_MAX); + io->buffer_output = h2_config_geti(cfg, H2_CONF_BUFFER_OUTPUT); + if (io->buffer_output < 0) { + io->buffer_output = h2_h2_is_tls(c); + } + + /* Currently we buffer only for TLS output. The reason this gives + * improved performance is that buckets send to the mod_ssl network + * filter will be encrypted in chunks. There is a special filter + * that tries to aggregate data, but that does not work well when + * bucket sizes alternate between tiny frame headers and large data + * chunks. + */ if (io->buffer_output) { - io->bufsize = H2_CONN_IO_BUF_SIZE; + io->bufsize = h2_config_geti(cfg, H2_CONF_BUFFER_SIZE); io->buffer = apr_pcalloc(c->pool, io->bufsize); } else { @@ -178,15 +185,15 @@ static apr_status_t flush_out(apr_bucket_brigade *bb, void *ctx) static apr_status_t bucketeer_buffer(h2_conn_io *io) { const char *data = io->buffer; apr_size_t remaining = io->buflen; - int bcount = (int)(remaining / H2_CONN_IO_SSL_WRITE_SIZE); + int bcount = (int)(remaining / io->max_write_size); apr_bucket *b; for (int i = 0; i < bcount; ++i) { - b = apr_bucket_transient_create(data, H2_CONN_IO_SSL_WRITE_SIZE, + b = apr_bucket_transient_create(data, io->max_write_size, io->output->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); - data += H2_CONN_IO_SSL_WRITE_SIZE; - remaining -= H2_CONN_IO_SSL_WRITE_SIZE; + data += io->max_write_size; + remaining -= io->max_write_size; } if (remaining > 0) { diff --git a/modules/http2/mod_h2/h2_conn_io.h b/modules/http2/mod_h2/h2_conn_io.h index ed2ab4c8f6..9d1db6c9b4 100644 --- a/modules/http2/mod_h2/h2_conn_io.h +++ b/modules/http2/mod_h2/h2_conn_io.h @@ -27,6 +27,7 @@ typedef struct { apr_bucket_brigade *input; apr_bucket_brigade *output; int buffer_output; + int max_write_size; char *buffer; apr_size_t buflen; diff --git a/modules/http2/mod_h2/h2_io.c b/modules/http2/mod_h2/h2_io.c index b44e384ee5..aa0619e230 100644 --- a/modules/http2/mod_h2/h2_io.c +++ b/modules/http2/mod_h2/h2_io.c @@ -81,7 +81,7 @@ apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb, apr_brigade_length(bb, 1, &start_len); apr_bucket *last = APR_BRIGADE_LAST(bb); apr_status_t status = h2_util_move(bb, io->bbin, maxlen, 0, - NULL, "h2_io_in_read"); + "h2_io_in_read"); if (status == APR_SUCCESS) { apr_bucket *nlast = APR_BRIGADE_LAST(bb); apr_off_t end_len = 0; @@ -105,7 +105,7 @@ apr_status_t h2_io_in_write(h2_io *io, apr_bucket_brigade *bb) io->bbin = apr_brigade_create(io->bbout->p, io->bbout->bucket_alloc); } - return h2_util_move(io->bbin, bb, 0, 0, NULL, "h2_io_in_write"); + return h2_util_move(io->bbin, bb, 0, 0, "h2_io_in_write"); } return APR_SUCCESS; } @@ -120,17 +120,6 @@ apr_status_t h2_io_in_close(h2_io *io) return APR_SUCCESS; } -apr_status_t h2_io_out_read(h2_io *io, char *buffer, - apr_size_t *plen, int *peos) -{ - if (buffer == NULL) { - /* just checking length available */ - return h2_util_bb_avail(io->bbout, plen, peos); - } - - return h2_util_bb_read(io->bbout, buffer, plen, peos); -} - apr_status_t h2_io_out_readx(h2_io *io, h2_io_data_cb *cb, void *ctx, apr_size_t *plen, int *peos) @@ -143,9 +132,27 @@ apr_status_t h2_io_out_readx(h2_io *io, } apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, - apr_size_t maxlen) + apr_size_t maxlen, int *pfile_handles_allowed) { - return h2_util_move(io->bbout, bb, maxlen, 0, NULL, "h2_io_out_write"); + /* Let's move the buckets from the request processing in here, so + * that the main thread can read them when it has time/capacity. + * + * Move at most "maxlen" memory bytes. If buckets remain, it is + * the caller's responsibility to take care of this. + * + * We allow passing of file buckets as long as we do not have too + * many open files already buffered. Otherwise we will run out of + * file handles. + */ + int start_allowed = *pfile_handles_allowed; + apr_status_t status; + status = h2_util_move(io->bbout, bb, maxlen, pfile_handles_allowed, + "h2_io_out_write"); + /* track # file buckets moved into our pool */ + if (start_allowed != *pfile_handles_allowed) { + io->files_handles_owned += (start_allowed - *pfile_handles_allowed); + } + return status; } diff --git a/modules/http2/mod_h2/h2_io.h b/modules/http2/mod_h2/h2_io.h index 119cd8a2b6..46606ea5e0 100644 --- a/modules/http2/mod_h2/h2_io.h +++ b/modules/http2/mod_h2/h2_io.h @@ -40,6 +40,7 @@ struct h2_io { struct apr_thread_cond_t *output_drained; /* block on writing */ struct h2_response *response;/* submittable response created */ + int files_handles_owned; }; /******************************************************************************* @@ -101,15 +102,12 @@ apr_status_t h2_io_in_close(h2_io *io); * @param plen the requested max len, set to amount of data on return * @param peos != 0 iff the end of stream has been reached */ -apr_status_t h2_io_out_read(h2_io *io, char *buffer, - apr_size_t *plen, int *peos); - apr_status_t h2_io_out_readx(h2_io *io, h2_io_data_cb *cb, void *ctx, apr_size_t *plen, int *peos); apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, - apr_size_t maxlen); + apr_size_t maxlen, int *pfile_buckets_allowed); /** * Closes the input. After existing data has been read, APR_EOF will diff --git a/modules/http2/mod_h2/h2_io_set.c b/modules/http2/mod_h2/h2_io_set.c index d87574caa3..40a7ef351e 100644 --- a/modules/http2/mod_h2/h2_io_set.c +++ b/modules/http2/mod_h2/h2_io_set.c @@ -66,7 +66,7 @@ h2_io *h2_io_set_get(h2_io_set *sp, int stream_id) /* we keep the array sorted by id, so lookup can be done * by bsearch. */ - h2_io key = { stream_id, NULL, 0, 0, 0, NULL, NULL, NULL, NULL }; + h2_io key = { stream_id, NULL, 0, 0, 0, NULL, NULL, NULL, NULL, 0 }; h2_io *pkey = &key; h2_io **ps = bsearch(&pkey, sp->list->elts, sp->list->nelts, sp->list->elt_size, h2_stream_id_cmp); diff --git a/modules/http2/mod_h2/h2_mplx.c b/modules/http2/mod_h2/h2_mplx.c index be477e1c78..147789cf08 100644 --- a/modules/http2/mod_h2/h2_mplx.c +++ b/modules/http2/mod_h2/h2_mplx.c @@ -130,12 +130,12 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, h2_workers *workers) m->closed = h2_stream_set_create(m->pool); m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->workers = workers; + + m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES); } return m; } -#define REF_COUNT_ATOMIC 1 - static void reference(h2_mplx *m) { apr_atomic_inc32(&m->refs); @@ -152,29 +152,11 @@ static void release(h2_mplx *m) void h2_mplx_reference(h2_mplx *m) { - if (REF_COUNT_ATOMIC) { - reference(m); - } - else { - apr_status_t status = apr_thread_mutex_lock(m->lock); - if (APR_SUCCESS == status) { - reference(m); - apr_thread_mutex_unlock(m->lock); - } - } + reference(m); } void h2_mplx_release(h2_mplx *m) { - if (REF_COUNT_ATOMIC) { - release(m); - } - else { - apr_status_t status = apr_thread_mutex_lock(m->lock); - if (APR_SUCCESS == status) { - release(m); - apr_thread_mutex_unlock(m->lock); - } - } + release(m); } static void workers_register(h2_mplx *m) { @@ -287,6 +269,10 @@ static void stream_destroy(h2_mplx *m, h2_stream *stream, h2_io *io) } h2_stream_destroy(stream); if (io) { + /* The pool is cleared/destroyed which also closes all + * allocated file handles. Give this count back to our + * file handle pool. */ + m->file_handles_allowed += io->files_handles_owned; h2_io_set_remove(m->stream_ios, io); h2_io_destroy(io); } @@ -450,30 +436,6 @@ apr_status_t h2_mplx_in_update_windows(h2_mplx *m, return status; } -apr_status_t h2_mplx_out_read(h2_mplx *m, int stream_id, - char *buffer, apr_size_t *plen, int *peos) -{ - AP_DEBUG_ASSERT(m); - if (m->aborted) { - return APR_ECONNABORTED; - } - apr_status_t status = apr_thread_mutex_lock(m->lock); - if (APR_SUCCESS == status) { - h2_io *io = h2_io_set_get(m->stream_ios, stream_id); - if (io) { - status = h2_io_out_read(io, buffer, plen, peos); - if (status == APR_SUCCESS && io->output_drained) { - apr_thread_cond_signal(io->output_drained); - } - } - else { - status = APR_ECONNABORTED; - } - apr_thread_mutex_unlock(m->lock); - } - return status; -} - apr_status_t h2_mplx_out_readx(h2_mplx *m, int stream_id, h2_io_data_cb *cb, void *ctx, apr_size_t *plen, int *peos) @@ -545,7 +507,8 @@ static apr_status_t out_write(h2_mplx *m, h2_io *io, && (status == APR_SUCCESS) && !is_aborted(m, &status)) { - status = h2_io_out_write(io, bb, m->stream_max_mem); + status = h2_io_out_write(io, bb, m->stream_max_mem, + &m->file_handles_allowed); /* Wait for data to drain until there is room again */ while (!APR_BRIGADE_EMPTY(bb) diff --git a/modules/http2/mod_h2/h2_mplx.h b/modules/http2/mod_h2/h2_mplx.h index 35c41a6be8..0c52ed8bcb 100644 --- a/modules/http2/mod_h2/h2_mplx.h +++ b/modules/http2/mod_h2/h2_mplx.h @@ -73,6 +73,7 @@ struct h2_mplx { apr_pool_t *spare_pool; /* spare pool, ready for next stream */ struct h2_stream_set *closed; /* streams closed, but task ongoing */ struct h2_workers *workers; + int file_handles_allowed; }; /******************************************************************************* @@ -214,9 +215,6 @@ struct h2_stream *h2_mplx_next_submit(h2_mplx *m, * Reads output data from the given stream. Will never block, but * return APR_EAGAIN until data arrives or the stream is closed. */ -apr_status_t h2_mplx_out_read(h2_mplx *mplx, int stream_id, - char *buffer, apr_size_t *plen, int *peos); - apr_status_t h2_mplx_out_readx(h2_mplx *mplx, int stream_id, h2_io_data_cb *cb, void *ctx, apr_size_t *plen, int *peos); diff --git a/modules/http2/mod_h2/h2_session.c b/modules/http2/mod_h2/h2_session.c index 06f8503b77..cb70dd1604 100644 --- a/modules/http2/mod_h2/h2_session.c +++ b/modules/http2/mod_h2/h2_session.c @@ -412,32 +412,28 @@ static apr_status_t send_data(h2_session *session, const char *data, return h2_conn_io_write(&session->io, data, length); } -#if NGHTTP2_HAS_DATA_CB - static apr_status_t pass_data(void *ctx, const char *data, apr_size_t length) { return send_data((h2_session*)ctx, data, length); } -int on_send_data_cb(nghttp2_session *ngh2, - nghttp2_frame *frame, - const uint8_t *framehd, - size_t length, - nghttp2_data_source *source, - void *userp) +static int on_send_data_cb(nghttp2_session *ngh2, + nghttp2_frame *frame, + const uint8_t *framehd, + size_t length, + nghttp2_data_source *source, + void *userp) { apr_status_t status = APR_SUCCESS; h2_session *session = (h2_session *)userp; int stream_id = (int)frame->hd.stream_id; const unsigned char padlen = frame->data.padlen; - apr_size_t frame_len = 9 + (padlen? 1 : 0) + length + padlen; - apr_size_t data_len = length; - apr_size_t avail, chunk_len; - int rv = 0; int eos; h2_stream *stream; + (void)ngh2; + (void)source; if (session->aborted) { return NGHTTP2_ERR_CALLBACK_FAILURE; } @@ -458,7 +454,6 @@ int on_send_data_cb(nghttp2_session *ngh2, if (status == APR_SUCCESS) { apr_size_t len = length; - int eos = 0; status = h2_stream_readx(stream, pass_data, session, &len, &eos); if (status == APR_SUCCESS && len != length) { @@ -488,8 +483,6 @@ int on_send_data_cb(nghttp2_session *ngh2, return h2_session_status_from_apr_status(status); } -#endif - #define NGH2_SET_CALLBACK(callbacks, name, fn)\ nghttp2_session_callbacks_set_##name##_callback(callbacks, fn) @@ -514,9 +507,7 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb) NGH2_SET_CALLBACK(*pcb, on_stream_close, on_stream_close_cb); NGH2_SET_CALLBACK(*pcb, on_begin_headers, on_begin_headers_cb); NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb); -#if NGHTTP2_HAS_DATA_CB NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb); -#endif return APR_SUCCESS; } @@ -990,7 +981,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s, { h2_session *session = (h2_session *)puser; AP_DEBUG_ASSERT(session); - (void)ng2s;(void)source; + (void)ng2s;(void)source;(void)buf; h2_stream *stream = h2_stream_set_get(session->streams, stream_id); if (!stream) { @@ -1004,17 +995,10 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s, apr_size_t nread = length; int eos = 0; -#if NGHTTP2_HAS_DATA_CB apr_status_t status = h2_stream_prep_read(stream, &nread, &eos); if (nread) { *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; } -#else - /* Try to pop data buckets from our queue for this stream - * until we see EOS or the buffer is full. - */ - apr_status_t status = h2_stream_read(stream, (char*)buf, &nread, &eos); -#endif switch (status) { case APR_SUCCESS: diff --git a/modules/http2/mod_h2/h2_stream.c b/modules/http2/mod_h2/h2_stream.c index 29add7bfe5..975d6cd805 100644 --- a/modules/http2/mod_h2/h2_stream.c +++ b/modules/http2/mod_h2/h2_stream.c @@ -113,7 +113,7 @@ apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response, stream->bbout = apr_brigade_create(stream->pool, stream->m->c->bucket_alloc); } - return h2_util_move(stream->bbout, bb, 16 * 1024, 1, NULL, + return h2_util_move(stream->bbout, bb, 16 * 1024, NULL, "h2_stream_set_response"); } return APR_SUCCESS; @@ -263,26 +263,6 @@ apr_status_t h2_stream_readx(h2_stream *stream, } -apr_status_t h2_stream_read(h2_stream *stream, char *buffer, - apr_size_t *plen, int *peos) -{ - apr_status_t status = APR_SUCCESS; - const char *src; - if (stream->bbout && !APR_BRIGADE_EMPTY(stream->bbout)) { - src = "stream"; - status = h2_util_bb_read(stream->bbout, buffer, plen, peos); - } - else { - src = "mplx"; - status = h2_mplx_out_read(stream->m, stream->id, buffer, plen, peos); - } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->m->c, - "h2_stream(%ld-%d): read %s, len=%ld eos=%d", - stream->m->id, stream->id, - src, (long)*plen, *peos); - return status; -} - void h2_stream_set_suspended(h2_stream *stream, int suspended) { AP_DEBUG_ASSERT(stream); diff --git a/modules/http2/mod_h2/h2_stream.h b/modules/http2/mod_h2/h2_stream.h index 9df2886834..f6bd71a5f8 100644 --- a/modules/http2/mod_h2/h2_stream.h +++ b/modules/http2/mod_h2/h2_stream.h @@ -96,9 +96,6 @@ apr_status_t h2_stream_set_response(h2_stream *stream, struct h2_response *response, apr_bucket_brigade *bb); -apr_status_t h2_stream_read(h2_stream *stream, char *buffer, - apr_size_t *plen, int *peos); - apr_status_t h2_stream_prep_read(h2_stream *stream, apr_size_t *plen, int *peos); diff --git a/modules/http2/mod_h2/h2_task_input.c b/modules/http2/mod_h2/h2_task_input.c index 54a46dcccf..6ea1a3f847 100644 --- a/modules/http2/mod_h2/h2_task_input.c +++ b/modules/http2/mod_h2/h2_task_input.c @@ -171,11 +171,11 @@ apr_status_t h2_task_input_read(h2_task_input *input, if (mode == AP_MODE_EXHAUSTIVE) { /* return all we have */ return h2_util_move(bb, input->bb, readbytes, 0, - NULL, "task_input_read(exhaustive)"); + "task_input_read(exhaustive)"); } else if (mode == AP_MODE_READBYTES) { return h2_util_move(bb, input->bb, readbytes, 0, - NULL, "task_input_read(readbytes)"); + "task_input_read(readbytes)"); } else if (mode == AP_MODE_SPECULATIVE) { /* return not more than was asked for */ diff --git a/modules/http2/mod_h2/h2_util.c b/modules/http2/mod_h2/h2_util.c index 1b08165878..33f9d81dc4 100644 --- a/modules/http2/mod_h2/h2_util.c +++ b/modules/http2/mod_h2/h2_util.c @@ -204,14 +204,17 @@ const char *h2_util_first_token_match(apr_pool_t *pool, const char *s, * still needed. */ static const int DEEP_COPY = 1; -static const int FILE_MOVE = 0; +static const int FILE_MOVE = 1; static apr_status_t last_not_included(apr_bucket_brigade *bb, - apr_size_t maxlen, int count_virtual, + apr_size_t maxlen, + int same_alloc, + int *pfile_buckets_allowed, apr_bucket **pend) { apr_bucket *b; apr_status_t status = APR_SUCCESS; + int files_allowed = pfile_buckets_allowed? *pfile_buckets_allowed : 0; if (maxlen > 0) { /* Find the bucket, up to which we reach maxlen/mem bytes */ @@ -237,10 +240,14 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb, } } - if (!count_virtual && FILE_MOVE && APR_BUCKET_IS_FILE(b)) { + if (same_alloc && APR_BUCKET_IS_FILE(b)) { + /* we like it move it, always */ + } + else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) { /* this has no memory footprint really unless * it is read, disregard it in length count, - * unless we count the virtual buckets */ + * unless we do not move the file buckets */ + --files_allowed; } else if (maxlen < b->length) { apr_bucket_split(b, maxlen); @@ -256,23 +263,28 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb, return status; } -#define LOG_LEVEL APLOG_TRACE2 +#define LOG_BUCKETS 0 +#define LOG_LEVEL APLOG_INFO apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_size_t maxlen, int count_virtual, - apr_file_t **pfile, const char *msg) + apr_size_t maxlen, int *pfile_handles_allowed, + const char *msg) { apr_status_t status = APR_SUCCESS; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); int same_alloc = (to->bucket_alloc == from->bucket_alloc); + + if (!FILE_MOVE) { + pfile_handles_allowed = NULL; + } if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end; - status = last_not_included(from, maxlen, - (count_virtual || !FILE_MOVE), &end); + status = last_not_included(from, maxlen, same_alloc, + pfile_handles_allowed, &end); if (status != APR_SUCCESS) { return status; } @@ -289,6 +301,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, * directly. */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); +#if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed bucket(same bucket_alloc) " "%ld-%ld, type=%s", @@ -297,6 +310,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, (APR_BUCKET_IS_EOS(b)? "EOS": (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA")); +#endif } else if (DEEP_COPY) { /* we have not managed the magic of passing buckets from @@ -306,19 +320,17 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, - "h2_util_move: %s, copied EOS bucket", msg); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, - "h2_util_move: %s, copied FLUSH bucket", msg); } else { /* ignore */ } } - else if (pfile && FILE_MOVE && APR_BUCKET_IS_FILE(b)) { + else if (pfile_handles_allowed + && *pfile_handles_allowed > 0 + && APR_BUCKET_IS_FILE(b)) { /* We do not want to read files when passing buckets, if * we can avoid it. However, what we've come up so far * is not working corrently, resulting either in crashes or @@ -327,15 +339,16 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_bucket_file *f = (apr_bucket_file *)b->data; apr_file_t *fd = f->fd; int setaside = (f->readpool != to->p); +#if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, moving FILE bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p, setaside); +#endif if (setaside) { status = apr_file_setaside(&fd, fd, to->p); - *pfile = fd; if (status != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p, "h2_util: %s, setaside FILE", msg); @@ -344,6 +357,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, } apr_brigade_insert_file(to, fd, b->start, b->length, to->p); + --(*pfile_handles_allowed); } else { const char *data; @@ -351,12 +365,14 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); +#if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); +#endif } } apr_bucket_delete(b); @@ -365,12 +381,14 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_bucket_setaside(b, to->p); APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); +#if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed setaside bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); +#endif } } } @@ -378,59 +396,11 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, return status; } -apr_status_t h2_util_pass(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_size_t maxlen, int count_virtual, - const char *msg) -{ - apr_status_t status = APR_SUCCESS; - - AP_DEBUG_ASSERT(to); - AP_DEBUG_ASSERT(from); - - if (!APR_BRIGADE_EMPTY(from)) { - apr_bucket *b, *end; - - status = last_not_included(from, maxlen, count_virtual, &end); - if (status != APR_SUCCESS) { - return status; - } - - while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) { - b = APR_BRIGADE_FIRST(from); - if (b == end) { - break; - } - - APR_BUCKET_REMOVE(b); - if (APR_BUCKET_IS_METADATA(b)) { - if (!APR_BUCKET_IS_EOS(b) && !APR_BUCKET_IS_FLUSH(b)) { - apr_bucket_delete(b); - continue; - } - } - else if (b->length == 0) { - apr_bucket_delete(b); - continue; - } - - APR_BRIGADE_INSERT_TAIL(to, b); - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, - "h2_util_pass: %s, passed bucket %ld-%ld, type=%s", - msg, (long)b->start, (long)b->length, - APR_BUCKET_IS_METADATA(b)? - (APR_BUCKET_IS_EOS(b)? "EOS": - (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : - (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA")); - } - } - - return status; -} - apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, const char *msg) { apr_status_t status = APR_SUCCESS; + (void)msg; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); @@ -439,7 +409,7 @@ apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end, *cpy; - status = last_not_included(from, maxlen, 1, &end); + status = last_not_included(from, maxlen, 0, 0, &end); if (status != APR_SUCCESS) { return status; } @@ -459,13 +429,9 @@ apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, - "h2_util_copy: %s, copied EOS bucket", msg); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, - "h2_util_copy: %s, copied FLUSH bucket", msg); } else { /* ignore */ @@ -477,12 +443,14 @@ apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); +#if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_copy: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); +#endif } } } @@ -508,7 +476,7 @@ int h2_util_has_eos(apr_bucket_brigade *bb, apr_size_t len) { apr_bucket *b, *end; - apr_status_t status = last_not_included(bb, len, 1, &end); + apr_status_t status = last_not_included(bb, len, 0, 0, &end); if (status != APR_SUCCESS) { return status; } @@ -571,70 +539,15 @@ apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb, return status; } -apr_status_t h2_util_bb_read(apr_bucket_brigade *bb, char *buffer, - apr_size_t *plen, int *peos) -{ - apr_status_t status = APR_SUCCESS; - apr_size_t avail = *plen; - apr_size_t written = 0; - - /* Copy data in our brigade into the buffer until it is filled or - * we encounter an EOS. - */ - *peos = 0; - while ((status == APR_SUCCESS) && !APR_BRIGADE_EMPTY(bb)) { - - apr_bucket *b = APR_BRIGADE_FIRST(bb); - if (APR_BUCKET_IS_METADATA(b)) { - if (APR_BUCKET_IS_EOS(b)) { - *peos = 1; - } - else { - /* ignore */ - } - } - else if (avail <= 0) { - break; - } - else { - const char *data; - apr_size_t data_len; - - if (b->length != ((apr_size_t)-1) && b->length > avail) { - apr_bucket_split(b, avail); - } - status = apr_bucket_read(b, &data, &data_len, - APR_NONBLOCK_READ); - if (status == APR_SUCCESS && data_len > 0) { - if (data_len > avail) { - apr_bucket_split(b, avail); - data_len = avail; - } - memcpy(buffer, data, data_len); - avail -= data_len; - buffer += data_len; - written += data_len; - } - } - apr_bucket_delete(b); - } - - *plen = written; - if (status == APR_SUCCESS && !*peos && !*plen) { - return APR_EAGAIN; - } - return status; -} - apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb, h2_util_pass_cb *cb, void *ctx, apr_size_t *plen, int *peos) { apr_status_t status = APR_SUCCESS; - apr_size_t avail = *plen; - apr_size_t written = 0; - apr_bucket *next, *b = APR_BRIGADE_FIRST(bb); int consume = (cb != NULL); + apr_size_t written = 0; + apr_size_t avail = *plen; + apr_bucket *next, *b; /* Pass data in our brigade through the callback until the length * is satisfied or we encounter an EOS. @@ -659,7 +572,8 @@ apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb, const char *data = NULL; apr_size_t data_len; - if (b->length != ((apr_size_t)-1)) { + if (b->length == ((apr_size_t)-1)) { + /* read to determine length */ status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ); } diff --git a/modules/http2/mod_h2/h2_util.h b/modules/http2/mod_h2/h2_util.h index a1438cca4f..e4c68dca9f 100644 --- a/modules/http2/mod_h2/h2_util.h +++ b/modules/http2/mod_h2/h2_util.h @@ -74,12 +74,13 @@ apr_size_t h2_util_base64url_decode(unsigned char **decoded, * @param to the brigade to move the data to * @param from the brigade to get the data from * @param maxlen of bytes to move, 0 for all - * @param count_virtual if virtual buckets like FILE do count against maxlen + * @param pfile_buckets_allowed how many file buckets may be moved, + * may be 0 or NULL * @param msg message for use in logging */ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_size_t maxlen, int count_virtual, - apr_file_t **pfile, const char *msg); + apr_size_t maxlen, int *pfile_buckets_allowed, + const char *msg); /** * Copies buckets from one brigade into another. If maxlen > 0, it only @@ -93,20 +94,6 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, const char *msg); -/** - * Pass the buckets from one brigade into another, without any setaside. - * Only recommended if both brigades use the same bucket alloc or if - * you really know what you are doing. - * @param to the brigade to pass the buckets to - * @param from the brigade to get the buckets from - * @param maxlen of bucket bytes to copy, 0 for all - * @param count_virtual if virtual buckets like FILE do count against maxlen - * @param msg message for use in logging - */ -apr_status_t h2_util_pass(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_size_t maxlen, int count_virtual, - const char *msg); - /** * Return != 0 iff there is a FLUSH or EOS bucket in the brigade. * @param bb the brigade to check on @@ -127,9 +114,6 @@ int h2_util_bb_has_data_or_eos(apr_bucket_brigade *bb); apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb, apr_size_t *plen, int *peos); -apr_status_t h2_util_bb_read(apr_bucket_brigade *bb, char *buffer, - apr_size_t *plen, int *peos); - typedef apr_status_t h2_util_pass_cb(void *ctx, const char *data, apr_size_t len); diff --git a/modules/http2/mod_h2/h2_version.h.in b/modules/http2/mod_h2/h2_version.h.in index 049a13f36a..cbef45a802 100644 --- a/modules/http2/mod_h2/h2_version.h.in +++ b/modules/http2/mod_h2/h2_version.h.in @@ -31,12 +31,4 @@ #define MOD_H2_VERSION_NUM @PACKAGE_VERSION_NUM@ -/** - * @macro - * != 0 iff the nghttp2 library supports data callbacks. - * Disabled for now since it lowers performance. - */ -#define NGHTTP2_HAS_DATA_CBXXX @NGHTTP2_HAS_DATA_CB@ -#define NGHTTP2_HAS_DATA_CB 0 - #endif /* mod_h2_h2_version_h */ diff --git a/modules/http2/mod_h2/h2_workers.c b/modules/http2/mod_h2/h2_workers.c index 94c3ccde59..6bdf965e80 100644 --- a/modules/http2/mod_h2/h2_workers.c +++ b/modules/http2/mod_h2/h2_workers.c @@ -43,6 +43,13 @@ static int in_list(h2_workers *workers, h2_mplx *m) } +/** + * Get the next task for the given worker. Will block until a task arrives + * or the max_wait timer expires and more than min workers exist. + * The previous h2_mplx instance might be passed in and will be served + * with preference, since we can ask it for the next task without aquiring + * the h2_workers lock. + */ static apr_status_t get_mplx_next(h2_worker *worker, h2_mplx **pm, h2_task **ptask, void *ctx) { @@ -71,7 +78,7 @@ static apr_status_t get_mplx_next(h2_worker *worker, h2_mplx **pm, } if (!ptask) { - /* of the worker does not want a next task, we're done. + /* the worker does not want a next task, we're done. */ return APR_SUCCESS; } @@ -82,7 +89,6 @@ static apr_status_t get_mplx_next(h2_worker *worker, h2_mplx **pm, status = apr_thread_mutex_lock(workers->lock); if (status == APR_SUCCESS) { ++workers->idle_worker_count; - ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s, "h2_worker(%d): looking for work", h2_worker_get_id(worker)); diff --git a/modules/http2/mod_h2/mod_h2.c b/modules/http2/mod_h2/mod_h2.c index 2e5f9223c3..b3c6b187d1 100644 --- a/modules/http2/mod_h2/mod_h2.c +++ b/modules/http2/mod_h2/mod_h2.c @@ -90,12 +90,7 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog, /* all fine, we know these ones */ break; case H2_MPM_PREFORK: - ap_log_error( APLOG_MARK, APLOG_WARNING, 0, s, - "This httpd uses mpm_prefork for multiprocessing. " - "Please take notice that mod_h2 always with run " - "requests in a multi-threaded environment. If you " - "use prefork for single-thread connection handling, " - " mod_h2 might pose problems."); + /* ok, we now know how to handle that one */ break; case H2_MPM_UNKNOWN: /* ??? */ diff --git a/modules/http2/sandbox/nghttp2/Makefile b/modules/http2/sandbox/nghttp2/Makefile index 8cfd700435..387f4cd64d 100644 --- a/modules/http2/sandbox/nghttp2/Makefile +++ b/modules/http2/sandbox/nghttp2/Makefile @@ -13,7 +13,7 @@ # limitations under the License. # -NGHTTP2_VERSION = 1.0.4 +NGHTTP2_VERSION = 1.0.5 NGHTTP2_DIR = nghttp2-$(NGHTTP2_VERSION) NGHTTP2_TAR = $(NGHTTP2_DIR).tar.gz NGHTTP2_URL = https://github.com/tatsuhiro-t/nghttp2/releases/download/v$(NGHTTP2_VERSION)/$(NGHTTP2_TAR) diff --git a/modules/http2/sandbox/test/bin/testrun b/modules/http2/sandbox/test/bin/testrun index 5da81f1c2f..23227340a4 100644 --- a/modules/http2/sandbox/test/bin/testrun +++ b/modules/http2/sandbox/test/bin/testrun @@ -5,17 +5,22 @@ SCHEME=https -HOST=test.example.org PORT=12346 +HOST=test.example.org URL=$SCHEME://$HOST:$PORT WRK=../wrk/wrk H2LOAD=${H2LOAD:-sandbox/install/bin/h2load} +RUNS=${RUNS:-200000} +CONNECTIONS=${CONNECTIONS:-100} +THREADS=${THREADS:-8} +RUNWRK=0 + run_wrk() { path=$1 - $WRK -c100 -t${THREADS:-8} -d30s $URL$path >/tmp/$$.out 2>&1 + $WRK -c${CONNECTIONS} -t${THREADS} -d15s $URL$path >/tmp/$$.out 2>&1 stat=$( fgrep 'Requests/sec: ' /tmp/$$.out ) reqs=${stat##Requests/sec: } @@ -23,7 +28,7 @@ run_wrk() { run_load() { path=$1 - $H2LOAD -c 100 -t ${THREADS:-8} -m ${MAX_STREAMS:-1} -n 500000 $URL$path > /tmp/$$.out 2>&1 + $H2LOAD -c ${CONNECTIONS} -t ${THREADS} -m ${MAX_STREAMS:-1} -n ${RUNS} $URL$path > /tmp/$$.out 2>&1 fin=$( fgrep 'finished in ' /tmp/$$.out ) stat=$( fgrep 'requests: ' /tmp/$$.out ) @@ -57,7 +62,8 @@ run_iter() { path=$1 max=$2 iterations=$3 - echo -n "m, wrk" + echo -n "m" + test "$RUNWRK" != 0 && echo -n ", wrk" for i in $iterations; do echo -n ", $i" done @@ -65,15 +71,19 @@ run_iter() { i=1 while [ $i -le $max ]; do echo -n "$i" - run_wrk $path - echo -n ", $reqs" + if test "$RUNWRK" != 0; then + run_wrk $path + echo -n ", $reqs" + fi run_m $path "$iterations" echo "" i=$[ i + 1 ] done } -URL_PATH=$1 - -echo "$URL_PATH, 500k requests, req/s" -run_iter $URL_PATH 10 "1 2 3 4 5 6 7 8 9 10 15 20 40 100" +for i in "$@"; do + URL_PATH=$i + echo "$URL_PATH, $RUNS requests, ${THREADS} threads, ${CONNECTIONS} connections, req/s" + run_iter $URL_PATH 5 "1 2 3 4 5 6 7 8 9 10 15 20 40 100" + echo +done diff --git a/modules/http2/sandbox/test/conf/httpd.conf b/modules/http2/sandbox/test/conf/httpd.conf index 746bd652ed..8086605b52 100644 --- a/modules/http2/sandbox/test/conf/httpd.conf +++ b/modules/http2/sandbox/test/conf/httpd.conf @@ -96,6 +96,19 @@ CoreDumpDirectory "/tmp" # Defaults to "On" #H2HackMpmEvent On + # If output data should be buffered or written directly. + # Defaults to "on" for TLS connections, "off" otherwise. + # H2BufferOutput On + + # The size of the output buffer per connection. + # Defaults to 64k + # H2BufferSize + + # The maximum number of bytes for a write operation from the buffer. This + # determines the maximum size of a TLS chunk, when buffering is used. + # Defaults to 16k + # H2BufferMaxWrite + @@ -103,7 +116,7 @@ CoreDumpDirectory "/tmp" # SSL Setup ################################################################################ - SSLCipherSuite HIGH:!aNULL:!MD5 + SSLCipherSuite ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK SSLProtocol All -SSLv2 -SSLv3 SSLPassPhraseDialog builtin SSLSessionCache "shmcb:SUBST_SERVER_ROOT_SUBST/logs/ssl_scache(512000)"