+v0.8.0
+--------------------------------------------------------------------------------
+ * when serving static files, these are kept open longer (and buffer copied
+ less). Use config "H2SessionExtraFiles" to change the value. The more you
+ can give per connection, the better. But you may run out of open file
+ handles, if not careful. Default value is 5, which seems to work on my
+ config with mpms worker, event and prefork. Basically, eaach httpd process
+ needs at least #connections + #workers + (#conn * #extrafiles) available.
+ * main connection buffering, buffer sizes and write sizes are configurable,
+ see README.md for the list of options. buffer sizes and write sizes influence
+ the TLS performance. Defaults seem reasonably, but ymmv.
+ * general performance improved due to better handling of stream close and
+ resource cleanup
+ * prefork mpm confirmed working. Unless configured otherwise, each session
+ will only have 1 worker thread. Raise this with "H2MaxWorkers"
+ * changed sandbox cipher config to mozilla recommendation (thanks Lucas)
+ * sandbox update to nghttp2 1.0.5
+
v0.7.3
--------------------------------------------------------------------------------
* sandbox update to nghttp2 1.0.4
* H2SerializeHeaders (on/off), "off" serialize/parse request+response headers for streams, as if they arrived in HTTP/1 format. When off, certain parts of httpd core filters are disabled/replaced to allow for a more efficient handling.
* H2HackMpmEvent (on/off), "on" performs a hack on internal connection in order to make mpm_event working, has no effect on other mpm modules
* H2Direct (on/off), "on" to enable h2c direct mode on a non-TLS host, default: off
+* H2BufferOutput (on/off), if output data shall be buffered. "on" for TLS connections, "off" otherwise
+* H2BufferSize n size of output buffer (if enabled), defaults to 64k
+* H2BufferWriteMax n max. number of bytes in a single write when buffering output, defaults to 16k
+* H2SessionExtraFiles n number of extra file handles a session might keep open to improve performance, depends on mpm module used and ulimit of processes, defaults to 5
All these configuration parameters can be set on servers/virtual hosts and
are not available on directory level. Note that Worker configuration is
#
AC_PREREQ([2.69])
-AC_INIT([mod_h2], [0.7.3], [stefan.eissing@greenbytes.de])
+AC_INIT([mod_h2], [0.8.0], [stefan.eissing@greenbytes.de])
LT_PREREQ([2.2.6])
LT_INIT()
# In sandbox build, we just define it therefore, as a quick workaround
CPPFLAGS="$CPPFLAGS -DPATH_MAX=4096"
- # we use a new nghttp2 in the sandbox which has these features
- NGHTTP2_HAS_DATA_CB=1
-
else
# production, we need to find where the apxs is. which then
# can tell us the various directories we need.
AC_CHECK_LIB([nghttp2], [nghttp2_session_server_new2], ,
[AC_MSG_ERROR("library nghttp2 not found")])
- AC_CHECK_LIB([nghttp2], [nghttp2_session_callbacks_set_send_data_callback],
- [NGHTTP2_HAS_DATA_CB=1], [NGHTTP2_HAS_DATA_CB=0])
-
fi
# Checks for header files.
AC_SUBST(BUILD_SUBDIRS)
AC_SUBST(SYSCONF_DIR)
AC_SUBST(LIBEXEC_DIR)
-AC_SUBST(NGHTTP2_HAS_DATA_CB)
AC_CONFIG_FILES([
Makefile
0, /* serialize headers */
1, /* hack mpm event */
1, /* h2 direct mode */
+ -1, /* buffer output, by default only for TLS */
+ 64*1024, /* buffer size */
+ 16*1024, /* out write max */
+ 5, /* # session extra files */
};
static void *h2_config_create(apr_pool_t *pool,
strcat(name, s);
strcat(name, "]");
- conf->name = name;
- conf->h2_enabled = DEF_VAL;
- conf->h2_max_streams = DEF_VAL;
- conf->h2_max_hl_size = DEF_VAL;
- conf->h2_window_size = DEF_VAL;
- conf->min_workers = DEF_VAL;
- conf->max_workers = DEF_VAL;
+ conf->name = name;
+ conf->h2_enabled = DEF_VAL;
+ conf->h2_max_streams = DEF_VAL;
+ conf->h2_max_hl_size = DEF_VAL;
+ conf->h2_window_size = DEF_VAL;
+ conf->min_workers = DEF_VAL;
+ conf->max_workers = DEF_VAL;
conf->max_worker_idle_secs = DEF_VAL;
- conf->stream_max_mem_size = DEF_VAL;
- conf->alt_svc_max_age = DEF_VAL;
- conf->serialize_headers = DEF_VAL;
- conf->hack_mpm_event = DEF_VAL;
- conf->h2_direct = DEF_VAL;
+ conf->stream_max_mem_size = DEF_VAL;
+ conf->alt_svc_max_age = DEF_VAL;
+ conf->serialize_headers = DEF_VAL;
+ conf->hack_mpm_event = DEF_VAL;
+ conf->h2_direct = DEF_VAL;
+ conf->buffer_output = DEF_VAL;
+ conf->buffer_output = DEF_VAL;
+ conf->buffer_size = DEF_VAL;
+ conf->write_max = DEF_VAL;
+ conf->session_extra_files = DEF_VAL;
return conf;
}
n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers);
n->hack_mpm_event = H2_CONFIG_GET(add, base, hack_mpm_event);
n->h2_direct = H2_CONFIG_GET(add, base, h2_direct);
+ n->buffer_output = H2_CONFIG_GET(add, base, buffer_output);
+ n->buffer_size = H2_CONFIG_GET(add, base, buffer_size);
+ n->write_max = H2_CONFIG_GET(add, base, write_max);
+ n->session_extra_files = H2_CONFIG_GET(add, base, session_extra_files);
return n;
}
return H2_CONFIG_GET(conf, &defconf, hack_mpm_event);
case H2_CONF_DIRECT:
return H2_CONFIG_GET(conf, &defconf, h2_direct);
+ case H2_CONF_BUFFER_OUTPUT:
+ return H2_CONFIG_GET(conf, &defconf, buffer_output);
+ case H2_CONF_BUFFER_SIZE:
+ return H2_CONFIG_GET(conf, &defconf, buffer_size);
+ case H2_CONF_WRITE_MAX:
+ return H2_CONFIG_GET(conf, &defconf, write_max);
+ case H2_CONF_SESSION_FILES:
+ return H2_CONFIG_GET(conf, &defconf, session_extra_files);
default:
return DEF_VAL;
}
h2_config *cfg = h2_config_sget(parms->server);
cfg->h2_max_streams = (int)apr_atoi64(value);
(void)arg;
+ if (cfg->h2_max_streams < 1) {
+ return "value must be > 0";
+ }
return NULL;
}
h2_config *cfg = h2_config_sget(parms->server);
cfg->h2_window_size = (int)apr_atoi64(value);
(void)arg;
+ if (cfg->h2_window_size < 1024) {
+ return "value must be > 1k";
+ }
return NULL;
}
h2_config *cfg = h2_config_sget(parms->server);
cfg->h2_max_hl_size = (int)apr_atoi64(value);
(void)arg;
+ if (cfg->h2_max_hl_size < 1024) {
+ return "value must be > 1k";
+ }
return NULL;
}
h2_config *cfg = h2_config_sget(parms->server);
cfg->min_workers = (int)apr_atoi64(value);
(void)arg;
+ if (cfg->min_workers < 1) {
+ return "value must be > 1";
+ }
return NULL;
}
h2_config *cfg = h2_config_sget(parms->server);
cfg->max_workers = (int)apr_atoi64(value);
(void)arg;
+ if (cfg->max_workers < 1) {
+ return "value must be > 1";
+ }
return NULL;
}
h2_config *cfg = h2_config_sget(parms->server);
cfg->max_worker_idle_secs = (int)apr_atoi64(value);
(void)arg;
+ if (cfg->max_worker_idle_secs < 1) {
+ return "value must be > 1";
+ }
return NULL;
}
cfg->stream_max_mem_size = (int)apr_atoi64(value);
(void)arg;
+ if (cfg->stream_max_mem_size < 1024) {
+ return "value must be > 1k";
+ }
return NULL;
}
return NULL;
}
+static const char *h2_conf_set_buffer_size(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = h2_config_sget(parms->server);
+ apr_int64_t len = (int)apr_atoi64(value);
+ if (len < (16*1024)) {
+ return "value must be a positive number, at least 16k";
+ }
+ cfg->buffer_size = (int)len;
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_write_max(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = h2_config_sget(parms->server);
+ apr_int64_t max = (int)apr_atoi64(value);
+ if (max <= 0) {
+ return "value must be a positive number";
+ }
+ else if (max > cfg->buffer_size) {
+ return "value must be less than H2BufferSize";
+ }
+ cfg->write_max = (int)max;
+ (void)arg;
+ return NULL;
+}
+
+static const char *h2_conf_set_session_extra_files(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = h2_config_sget(parms->server);
+ apr_int64_t max = (int)apr_atoi64(value);
+ if (max <= 0) {
+ return "value must be a positive number";
+ }
+ cfg->session_extra_files = (int)max;
+ (void)arg;
+ return NULL;
+}
+
static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
void *arg, const char *value)
{
h2_config *cfg = h2_config_sget(parms->server);
- cfg->serialize_headers = !apr_strnatcasecmp(value, "On");
+ if (!strcasecmp(value, "On")) {
+ cfg->serialize_headers = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->serialize_headers = 0;
+ return NULL;
+ }
+
(void)arg;
- return NULL;
+ return "value must be On or Off";
}
static const char *h2_conf_set_hack_mpm_event(cmd_parms *parms,
void *arg, const char *value)
{
h2_config *cfg = h2_config_sget(parms->server);
- cfg->hack_mpm_event = !apr_strnatcasecmp(value, "On");
+ if (!strcasecmp(value, "On")) {
+ cfg->hack_mpm_event = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->hack_mpm_event = 0;
+ return NULL;
+ }
+
(void)arg;
- return NULL;
+ return "value must be On or Off";
}
static const char *h2_conf_set_direct(cmd_parms *parms,
void *arg, const char *value)
{
h2_config *cfg = h2_config_sget(parms->server);
- cfg->h2_direct = !apr_strnatcasecmp(value, "On");
+ if (!strcasecmp(value, "On")) {
+ cfg->h2_direct = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->h2_direct = 0;
+ return NULL;
+ }
+
(void)arg;
- return NULL;
+ return "value must be On or Off";
+}
+
+static const char *h2_conf_set_buffer_output(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->buffer_output = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->buffer_output = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
}
#pragma GCC diagnostic ignored "-Wmissing-braces"
AP_INIT_TAKE1("H2HackMpmEvent", h2_conf_set_hack_mpm_event, NULL,
RSRC_CONF, "on to enable a hack that makes mpm_event working with mod_h2"),
AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
- RSRC_CONF, "on to enable direct HTTP/2 mode on non-TLS"),
+ RSRC_CONF, "on to enable direct HTTP/2 mode"),
+ AP_INIT_TAKE1("H2BufferOutput", h2_conf_set_buffer_output, NULL,
+ RSRC_CONF, "on to enable output buffering, default for TLS"),
+ AP_INIT_TAKE1("H2BufferSize", h2_conf_set_buffer_size, NULL,
+ RSRC_CONF, "size of outgoing buffer in bytes"),
+ AP_INIT_TAKE1("H2BufferWriteMax", h2_conf_set_write_max, NULL,
+ RSRC_CONF, "maximum number of bytes in a outgoing write"),
+ AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
+ RSRC_CONF, "number of extra file a session might keep open"),
{ NULL, NULL, NULL, 0, 0, NULL }
};
H2_CONF_SER_HEADERS,
H2_CONF_HACK_MPM_EVENT,
H2_CONF_DIRECT,
+ H2_CONF_BUFFER_OUTPUT,
+ H2_CONF_BUFFER_SIZE,
+ H2_CONF_WRITE_MAX,
+ H2_CONF_SESSION_FILES,
} h2_config_var_t;
/* Apache httpd module configuration for h2. */
int hack_mpm_event; /* If mpm_event is detected, perform a hack
on stream connections to make it work */
int h2_direct; /* if mod_h2 is active on non-TLS directly */
+ int buffer_output; /* if output buffering shall be used */
+ int buffer_size; /* size of buffer for outgoing data */
+ int write_max; /* max number of bytes for a write op */
+ int session_extra_files; /* # of extra files a session may keep open */
} h2_config;
else if (!strcmp("prefork.c", m->name)) {
mpm_type = H2_MPM_PREFORK;
mpm_module = m;
- /* prefork reports 1 thread per child, also as max */
- if (maxw == 1) {
- maxw = 8; /* number of cores maybe? */
- }
}
else if (!strcmp("mod_ssl.c", m->name)) {
ssl_module = m;
#include <http_connection.h>
#include "h2_private.h"
+#include "h2_config.h"
#include "h2_conn_io.h"
#include "h2_h2.h"
#include "h2_util.h"
-/* If we write directly to our brigade or use a char buffer to collect
- * out data.
- */
-
-#define H2_CONN_IO_BUF_SIZE (64 * 1024)
-#define H2_CONN_IO_SSL_WRITE_SIZE (16 * 1024)
-
-
apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c)
{
+ h2_config *cfg = h2_config_get(c);
+
io->connection = c;
io->input = apr_brigade_create(c->pool, c->bucket_alloc);
io->output = apr_brigade_create(c->pool, c->bucket_alloc);
- io->buffer_output = h2_h2_is_tls(c);
io->buflen = 0;
+ io->max_write_size = h2_config_geti(cfg, H2_CONF_WRITE_MAX);
+ io->buffer_output = h2_config_geti(cfg, H2_CONF_BUFFER_OUTPUT);
+ if (io->buffer_output < 0) {
+ io->buffer_output = h2_h2_is_tls(c);
+ }
+
+ /* Currently we buffer only for TLS output. The reason this gives
+ * improved performance is that buckets send to the mod_ssl network
+ * filter will be encrypted in chunks. There is a special filter
+ * that tries to aggregate data, but that does not work well when
+ * bucket sizes alternate between tiny frame headers and large data
+ * chunks.
+ */
if (io->buffer_output) {
- io->bufsize = H2_CONN_IO_BUF_SIZE;
+ io->bufsize = h2_config_geti(cfg, H2_CONF_BUFFER_SIZE);
io->buffer = apr_pcalloc(c->pool, io->bufsize);
}
else {
static apr_status_t bucketeer_buffer(h2_conn_io *io) {
const char *data = io->buffer;
apr_size_t remaining = io->buflen;
- int bcount = (int)(remaining / H2_CONN_IO_SSL_WRITE_SIZE);
+ int bcount = (int)(remaining / io->max_write_size);
apr_bucket *b;
for (int i = 0; i < bcount; ++i) {
- b = apr_bucket_transient_create(data, H2_CONN_IO_SSL_WRITE_SIZE,
+ b = apr_bucket_transient_create(data, io->max_write_size,
io->output->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(io->output, b);
- data += H2_CONN_IO_SSL_WRITE_SIZE;
- remaining -= H2_CONN_IO_SSL_WRITE_SIZE;
+ data += io->max_write_size;
+ remaining -= io->max_write_size;
}
if (remaining > 0) {
apr_bucket_brigade *input;
apr_bucket_brigade *output;
int buffer_output;
+ int max_write_size;
char *buffer;
apr_size_t buflen;
apr_brigade_length(bb, 1, &start_len);
apr_bucket *last = APR_BRIGADE_LAST(bb);
apr_status_t status = h2_util_move(bb, io->bbin, maxlen, 0,
- NULL, "h2_io_in_read");
+ "h2_io_in_read");
if (status == APR_SUCCESS) {
apr_bucket *nlast = APR_BRIGADE_LAST(bb);
apr_off_t end_len = 0;
io->bbin = apr_brigade_create(io->bbout->p,
io->bbout->bucket_alloc);
}
- return h2_util_move(io->bbin, bb, 0, 0, NULL, "h2_io_in_write");
+ return h2_util_move(io->bbin, bb, 0, 0, "h2_io_in_write");
}
return APR_SUCCESS;
}
return APR_SUCCESS;
}
-apr_status_t h2_io_out_read(h2_io *io, char *buffer,
- apr_size_t *plen, int *peos)
-{
- if (buffer == NULL) {
- /* just checking length available */
- return h2_util_bb_avail(io->bbout, plen, peos);
- }
-
- return h2_util_bb_read(io->bbout, buffer, plen, peos);
-}
-
apr_status_t h2_io_out_readx(h2_io *io,
h2_io_data_cb *cb, void *ctx,
apr_size_t *plen, int *peos)
}
apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen)
+ apr_size_t maxlen, int *pfile_handles_allowed)
{
- return h2_util_move(io->bbout, bb, maxlen, 0, NULL, "h2_io_out_write");
+ /* Let's move the buckets from the request processing in here, so
+ * that the main thread can read them when it has time/capacity.
+ *
+ * Move at most "maxlen" memory bytes. If buckets remain, it is
+ * the caller's responsibility to take care of this.
+ *
+ * We allow passing of file buckets as long as we do not have too
+ * many open files already buffered. Otherwise we will run out of
+ * file handles.
+ */
+ int start_allowed = *pfile_handles_allowed;
+ apr_status_t status;
+ status = h2_util_move(io->bbout, bb, maxlen, pfile_handles_allowed,
+ "h2_io_out_write");
+ /* track # file buckets moved into our pool */
+ if (start_allowed != *pfile_handles_allowed) {
+ io->files_handles_owned += (start_allowed - *pfile_handles_allowed);
+ }
+ return status;
}
struct apr_thread_cond_t *output_drained; /* block on writing */
struct h2_response *response;/* submittable response created */
+ int files_handles_owned;
};
/*******************************************************************************
* @param plen the requested max len, set to amount of data on return
* @param peos != 0 iff the end of stream has been reached
*/
-apr_status_t h2_io_out_read(h2_io *io, char *buffer,
- apr_size_t *plen, int *peos);
-
apr_status_t h2_io_out_readx(h2_io *io,
h2_io_data_cb *cb, void *ctx,
apr_size_t *plen, int *peos);
apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb,
- apr_size_t maxlen);
+ apr_size_t maxlen, int *pfile_buckets_allowed);
/**
* Closes the input. After existing data has been read, APR_EOF will
/* we keep the array sorted by id, so lookup can be done
* by bsearch.
*/
- h2_io key = { stream_id, NULL, 0, 0, 0, NULL, NULL, NULL, NULL };
+ h2_io key = { stream_id, NULL, 0, 0, 0, NULL, NULL, NULL, NULL, 0 };
h2_io *pkey = &key;
h2_io **ps = bsearch(&pkey, sp->list->elts, sp->list->nelts,
sp->list->elt_size, h2_stream_id_cmp);
m->closed = h2_stream_set_create(m->pool);
m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
m->workers = workers;
+
+ m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES);
}
return m;
}
-#define REF_COUNT_ATOMIC 1
-
static void reference(h2_mplx *m)
{
apr_atomic_inc32(&m->refs);
void h2_mplx_reference(h2_mplx *m)
{
- if (REF_COUNT_ATOMIC) {
- reference(m);
- }
- else {
- apr_status_t status = apr_thread_mutex_lock(m->lock);
- if (APR_SUCCESS == status) {
- reference(m);
- apr_thread_mutex_unlock(m->lock);
- }
- }
+ reference(m);
}
void h2_mplx_release(h2_mplx *m)
{
- if (REF_COUNT_ATOMIC) {
- release(m);
- }
- else {
- apr_status_t status = apr_thread_mutex_lock(m->lock);
- if (APR_SUCCESS == status) {
- release(m);
- apr_thread_mutex_unlock(m->lock);
- }
- }
+ release(m);
}
static void workers_register(h2_mplx *m) {
}
h2_stream_destroy(stream);
if (io) {
+ /* The pool is cleared/destroyed which also closes all
+ * allocated file handles. Give this count back to our
+ * file handle pool. */
+ m->file_handles_allowed += io->files_handles_owned;
h2_io_set_remove(m->stream_ios, io);
h2_io_destroy(io);
}
return status;
}
-apr_status_t h2_mplx_out_read(h2_mplx *m, int stream_id,
- char *buffer, apr_size_t *plen, int *peos)
-{
- AP_DEBUG_ASSERT(m);
- if (m->aborted) {
- return APR_ECONNABORTED;
- }
- apr_status_t status = apr_thread_mutex_lock(m->lock);
- if (APR_SUCCESS == status) {
- h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
- if (io) {
- status = h2_io_out_read(io, buffer, plen, peos);
- if (status == APR_SUCCESS && io->output_drained) {
- apr_thread_cond_signal(io->output_drained);
- }
- }
- else {
- status = APR_ECONNABORTED;
- }
- apr_thread_mutex_unlock(m->lock);
- }
- return status;
-}
-
apr_status_t h2_mplx_out_readx(h2_mplx *m, int stream_id,
h2_io_data_cb *cb, void *ctx,
apr_size_t *plen, int *peos)
&& (status == APR_SUCCESS)
&& !is_aborted(m, &status)) {
- status = h2_io_out_write(io, bb, m->stream_max_mem);
+ status = h2_io_out_write(io, bb, m->stream_max_mem,
+ &m->file_handles_allowed);
/* Wait for data to drain until there is room again */
while (!APR_BRIGADE_EMPTY(bb)
apr_pool_t *spare_pool; /* spare pool, ready for next stream */
struct h2_stream_set *closed; /* streams closed, but task ongoing */
struct h2_workers *workers;
+ int file_handles_allowed;
};
/*******************************************************************************
* Reads output data from the given stream. Will never block, but
* return APR_EAGAIN until data arrives or the stream is closed.
*/
-apr_status_t h2_mplx_out_read(h2_mplx *mplx, int stream_id,
- char *buffer, apr_size_t *plen, int *peos);
-
apr_status_t h2_mplx_out_readx(h2_mplx *mplx, int stream_id,
h2_io_data_cb *cb, void *ctx,
apr_size_t *plen, int *peos);
return h2_conn_io_write(&session->io, data, length);
}
-#if NGHTTP2_HAS_DATA_CB
-
static apr_status_t pass_data(void *ctx,
const char *data, apr_size_t length)
{
return send_data((h2_session*)ctx, data, length);
}
-int on_send_data_cb(nghttp2_session *ngh2,
- nghttp2_frame *frame,
- const uint8_t *framehd,
- size_t length,
- nghttp2_data_source *source,
- void *userp)
+static int on_send_data_cb(nghttp2_session *ngh2,
+ nghttp2_frame *frame,
+ const uint8_t *framehd,
+ size_t length,
+ nghttp2_data_source *source,
+ void *userp)
{
apr_status_t status = APR_SUCCESS;
h2_session *session = (h2_session *)userp;
int stream_id = (int)frame->hd.stream_id;
const unsigned char padlen = frame->data.padlen;
- apr_size_t frame_len = 9 + (padlen? 1 : 0) + length + padlen;
- apr_size_t data_len = length;
- apr_size_t avail, chunk_len;
- int rv = 0;
int eos;
h2_stream *stream;
+ (void)ngh2;
+ (void)source;
if (session->aborted) {
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
if (status == APR_SUCCESS) {
apr_size_t len = length;
- int eos = 0;
status = h2_stream_readx(stream, pass_data, session,
&len, &eos);
if (status == APR_SUCCESS && len != length) {
return h2_session_status_from_apr_status(status);
}
-#endif
-
#define NGH2_SET_CALLBACK(callbacks, name, fn)\
nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
NGH2_SET_CALLBACK(*pcb, on_stream_close, on_stream_close_cb);
NGH2_SET_CALLBACK(*pcb, on_begin_headers, on_begin_headers_cb);
NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb);
-#if NGHTTP2_HAS_DATA_CB
NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb);
-#endif
return APR_SUCCESS;
}
{
h2_session *session = (h2_session *)puser;
AP_DEBUG_ASSERT(session);
- (void)ng2s;(void)source;
+ (void)ng2s;(void)source;(void)buf;
h2_stream *stream = h2_stream_set_get(session->streams, stream_id);
if (!stream) {
apr_size_t nread = length;
int eos = 0;
-#if NGHTTP2_HAS_DATA_CB
apr_status_t status = h2_stream_prep_read(stream, &nread, &eos);
if (nread) {
*data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
}
-#else
- /* Try to pop data buckets from our queue for this stream
- * until we see EOS or the buffer is full.
- */
- apr_status_t status = h2_stream_read(stream, (char*)buf, &nread, &eos);
-#endif
switch (status) {
case APR_SUCCESS:
stream->bbout = apr_brigade_create(stream->pool,
stream->m->c->bucket_alloc);
}
- return h2_util_move(stream->bbout, bb, 16 * 1024, 1, NULL,
+ return h2_util_move(stream->bbout, bb, 16 * 1024, NULL,
"h2_stream_set_response");
}
return APR_SUCCESS;
}
-apr_status_t h2_stream_read(h2_stream *stream, char *buffer,
- apr_size_t *plen, int *peos)
-{
- apr_status_t status = APR_SUCCESS;
- const char *src;
- if (stream->bbout && !APR_BRIGADE_EMPTY(stream->bbout)) {
- src = "stream";
- status = h2_util_bb_read(stream->bbout, buffer, plen, peos);
- }
- else {
- src = "mplx";
- status = h2_mplx_out_read(stream->m, stream->id, buffer, plen, peos);
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->m->c,
- "h2_stream(%ld-%d): read %s, len=%ld eos=%d",
- stream->m->id, stream->id,
- src, (long)*plen, *peos);
- return status;
-}
-
void h2_stream_set_suspended(h2_stream *stream, int suspended)
{
AP_DEBUG_ASSERT(stream);
struct h2_response *response,
apr_bucket_brigade *bb);
-apr_status_t h2_stream_read(h2_stream *stream, char *buffer,
- apr_size_t *plen, int *peos);
-
apr_status_t h2_stream_prep_read(h2_stream *stream,
apr_size_t *plen, int *peos);
if (mode == AP_MODE_EXHAUSTIVE) {
/* return all we have */
return h2_util_move(bb, input->bb, readbytes, 0,
- NULL, "task_input_read(exhaustive)");
+ "task_input_read(exhaustive)");
}
else if (mode == AP_MODE_READBYTES) {
return h2_util_move(bb, input->bb, readbytes, 0,
- NULL, "task_input_read(readbytes)");
+ "task_input_read(readbytes)");
}
else if (mode == AP_MODE_SPECULATIVE) {
/* return not more than was asked for */
* still needed.
*/
static const int DEEP_COPY = 1;
-static const int FILE_MOVE = 0;
+static const int FILE_MOVE = 1;
static apr_status_t last_not_included(apr_bucket_brigade *bb,
- apr_size_t maxlen, int count_virtual,
+ apr_size_t maxlen,
+ int same_alloc,
+ int *pfile_buckets_allowed,
apr_bucket **pend)
{
apr_bucket *b;
apr_status_t status = APR_SUCCESS;
+ int files_allowed = pfile_buckets_allowed? *pfile_buckets_allowed : 0;
if (maxlen > 0) {
/* Find the bucket, up to which we reach maxlen/mem bytes */
}
}
- if (!count_virtual && FILE_MOVE && APR_BUCKET_IS_FILE(b)) {
+ if (same_alloc && APR_BUCKET_IS_FILE(b)) {
+ /* we like it move it, always */
+ }
+ else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) {
/* this has no memory footprint really unless
* it is read, disregard it in length count,
- * unless we count the virtual buckets */
+ * unless we do not move the file buckets */
+ --files_allowed;
}
else if (maxlen < b->length) {
apr_bucket_split(b, maxlen);
return status;
}
-#define LOG_LEVEL APLOG_TRACE2
+#define LOG_BUCKETS 0
+#define LOG_LEVEL APLOG_INFO
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_size_t maxlen, int count_virtual,
- apr_file_t **pfile, const char *msg)
+ apr_size_t maxlen, int *pfile_handles_allowed,
+ const char *msg)
{
apr_status_t status = APR_SUCCESS;
AP_DEBUG_ASSERT(to);
AP_DEBUG_ASSERT(from);
int same_alloc = (to->bucket_alloc == from->bucket_alloc);
+
+ if (!FILE_MOVE) {
+ pfile_handles_allowed = NULL;
+ }
if (!APR_BRIGADE_EMPTY(from)) {
apr_bucket *b, *end;
- status = last_not_included(from, maxlen,
- (count_virtual || !FILE_MOVE), &end);
+ status = last_not_included(from, maxlen, same_alloc,
+ pfile_handles_allowed, &end);
if (status != APR_SUCCESS) {
return status;
}
* directly. */
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(to, b);
+#if LOG_BUCKETS
ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
"h2_util_move: %s, passed bucket(same bucket_alloc) "
"%ld-%ld, type=%s",
(APR_BUCKET_IS_EOS(b)? "EOS":
(APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) :
(APR_BUCKET_IS_FILE(b)? "FILE" : "DATA"));
+#endif
}
else if (DEEP_COPY) {
/* we have not managed the magic of passing buckets from
if (APR_BUCKET_IS_METADATA(b)) {
if (APR_BUCKET_IS_EOS(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
- "h2_util_move: %s, copied EOS bucket", msg);
}
else if (APR_BUCKET_IS_FLUSH(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
- "h2_util_move: %s, copied FLUSH bucket", msg);
}
else {
/* ignore */
}
}
- else if (pfile && FILE_MOVE && APR_BUCKET_IS_FILE(b)) {
+ else if (pfile_handles_allowed
+ && *pfile_handles_allowed > 0
+ && APR_BUCKET_IS_FILE(b)) {
/* We do not want to read files when passing buckets, if
* we can avoid it. However, what we've come up so far
* is not working corrently, resulting either in crashes or
apr_bucket_file *f = (apr_bucket_file *)b->data;
apr_file_t *fd = f->fd;
int setaside = (f->readpool != to->p);
+#if LOG_BUCKETS
ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
"h2_util_move: %s, moving FILE bucket %ld-%ld "
"from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d",
msg, (long)b->start, (long)b->length,
(long)from, (long)from->p,
(long)to, (long)to->p, setaside);
+#endif
if (setaside) {
status = apr_file_setaside(&fd, fd, to->p);
- *pfile = fd;
if (status != APR_SUCCESS) {
ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p,
"h2_util: %s, setaside FILE", msg);
}
apr_brigade_insert_file(to, fd, b->start, b->length,
to->p);
+ --(*pfile_handles_allowed);
}
else {
const char *data;
status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status == APR_SUCCESS && len > 0) {
status = apr_brigade_write(to, NULL, NULL, data, len);
+#if LOG_BUCKETS
ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
"h2_util_move: %s, copied bucket %ld-%ld "
"from=%lx(p=%lx) to=%lx(p=%lx)",
msg, (long)b->start, (long)b->length,
(long)from, (long)from->p,
(long)to, (long)to->p);
+#endif
}
}
apr_bucket_delete(b);
apr_bucket_setaside(b, to->p);
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(to, b);
+#if LOG_BUCKETS
ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
"h2_util_move: %s, passed setaside bucket %ld-%ld "
"from=%lx(p=%lx) to=%lx(p=%lx)",
msg, (long)b->start, (long)b->length,
(long)from, (long)from->p,
(long)to, (long)to->p);
+#endif
}
}
}
return status;
}
-apr_status_t h2_util_pass(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_size_t maxlen, int count_virtual,
- const char *msg)
-{
- apr_status_t status = APR_SUCCESS;
-
- AP_DEBUG_ASSERT(to);
- AP_DEBUG_ASSERT(from);
-
- if (!APR_BRIGADE_EMPTY(from)) {
- apr_bucket *b, *end;
-
- status = last_not_included(from, maxlen, count_virtual, &end);
- if (status != APR_SUCCESS) {
- return status;
- }
-
- while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) {
- b = APR_BRIGADE_FIRST(from);
- if (b == end) {
- break;
- }
-
- APR_BUCKET_REMOVE(b);
- if (APR_BUCKET_IS_METADATA(b)) {
- if (!APR_BUCKET_IS_EOS(b) && !APR_BUCKET_IS_FLUSH(b)) {
- apr_bucket_delete(b);
- continue;
- }
- }
- else if (b->length == 0) {
- apr_bucket_delete(b);
- continue;
- }
-
- APR_BRIGADE_INSERT_TAIL(to, b);
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
- "h2_util_pass: %s, passed bucket %ld-%ld, type=%s",
- msg, (long)b->start, (long)b->length,
- APR_BUCKET_IS_METADATA(b)?
- (APR_BUCKET_IS_EOS(b)? "EOS":
- (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) :
- (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA"));
- }
- }
-
- return status;
-}
-
apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from,
apr_size_t maxlen, const char *msg)
{
apr_status_t status = APR_SUCCESS;
+ (void)msg;
AP_DEBUG_ASSERT(to);
AP_DEBUG_ASSERT(from);
if (!APR_BRIGADE_EMPTY(from)) {
apr_bucket *b, *end, *cpy;
- status = last_not_included(from, maxlen, 1, &end);
+ status = last_not_included(from, maxlen, 0, 0, &end);
if (status != APR_SUCCESS) {
return status;
}
if (APR_BUCKET_IS_METADATA(b)) {
if (APR_BUCKET_IS_EOS(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
- "h2_util_copy: %s, copied EOS bucket", msg);
}
else if (APR_BUCKET_IS_FLUSH(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
- ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
- "h2_util_copy: %s, copied FLUSH bucket", msg);
}
else {
/* ignore */
status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status == APR_SUCCESS && len > 0) {
status = apr_brigade_write(to, NULL, NULL, data, len);
+#if LOG_BUCKETS
ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
"h2_util_copy: %s, copied bucket %ld-%ld "
"from=%lx(p=%lx) to=%lx(p=%lx)",
msg, (long)b->start, (long)b->length,
(long)from, (long)from->p,
(long)to, (long)to->p);
+#endif
}
}
}
{
apr_bucket *b, *end;
- apr_status_t status = last_not_included(bb, len, 1, &end);
+ apr_status_t status = last_not_included(bb, len, 0, 0, &end);
if (status != APR_SUCCESS) {
return status;
}
return status;
}
-apr_status_t h2_util_bb_read(apr_bucket_brigade *bb, char *buffer,
- apr_size_t *plen, int *peos)
-{
- apr_status_t status = APR_SUCCESS;
- apr_size_t avail = *plen;
- apr_size_t written = 0;
-
- /* Copy data in our brigade into the buffer until it is filled or
- * we encounter an EOS.
- */
- *peos = 0;
- while ((status == APR_SUCCESS) && !APR_BRIGADE_EMPTY(bb)) {
-
- apr_bucket *b = APR_BRIGADE_FIRST(bb);
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- *peos = 1;
- }
- else {
- /* ignore */
- }
- }
- else if (avail <= 0) {
- break;
- }
- else {
- const char *data;
- apr_size_t data_len;
-
- if (b->length != ((apr_size_t)-1) && b->length > avail) {
- apr_bucket_split(b, avail);
- }
- status = apr_bucket_read(b, &data, &data_len,
- APR_NONBLOCK_READ);
- if (status == APR_SUCCESS && data_len > 0) {
- if (data_len > avail) {
- apr_bucket_split(b, avail);
- data_len = avail;
- }
- memcpy(buffer, data, data_len);
- avail -= data_len;
- buffer += data_len;
- written += data_len;
- }
- }
- apr_bucket_delete(b);
- }
-
- *plen = written;
- if (status == APR_SUCCESS && !*peos && !*plen) {
- return APR_EAGAIN;
- }
- return status;
-}
-
apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
h2_util_pass_cb *cb, void *ctx,
apr_size_t *plen, int *peos)
{
apr_status_t status = APR_SUCCESS;
- apr_size_t avail = *plen;
- apr_size_t written = 0;
- apr_bucket *next, *b = APR_BRIGADE_FIRST(bb);
int consume = (cb != NULL);
+ apr_size_t written = 0;
+ apr_size_t avail = *plen;
+ apr_bucket *next, *b;
/* Pass data in our brigade through the callback until the length
* is satisfied or we encounter an EOS.
const char *data = NULL;
apr_size_t data_len;
- if (b->length != ((apr_size_t)-1)) {
+ if (b->length == ((apr_size_t)-1)) {
+ /* read to determine length */
status = apr_bucket_read(b, &data, &data_len,
APR_NONBLOCK_READ);
}
* @param to the brigade to move the data to
* @param from the brigade to get the data from
* @param maxlen of bytes to move, 0 for all
- * @param count_virtual if virtual buckets like FILE do count against maxlen
+ * @param pfile_buckets_allowed how many file buckets may be moved,
+ * may be 0 or NULL
* @param msg message for use in logging
*/
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_size_t maxlen, int count_virtual,
- apr_file_t **pfile, const char *msg);
+ apr_size_t maxlen, int *pfile_buckets_allowed,
+ const char *msg);
/**
* Copies buckets from one brigade into another. If maxlen > 0, it only
apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from,
apr_size_t maxlen, const char *msg);
-/**
- * Pass the buckets from one brigade into another, without any setaside.
- * Only recommended if both brigades use the same bucket alloc or if
- * you really know what you are doing.
- * @param to the brigade to pass the buckets to
- * @param from the brigade to get the buckets from
- * @param maxlen of bucket bytes to copy, 0 for all
- * @param count_virtual if virtual buckets like FILE do count against maxlen
- * @param msg message for use in logging
- */
-apr_status_t h2_util_pass(apr_bucket_brigade *to, apr_bucket_brigade *from,
- apr_size_t maxlen, int count_virtual,
- const char *msg);
-
/**
* Return != 0 iff there is a FLUSH or EOS bucket in the brigade.
* @param bb the brigade to check on
apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
apr_size_t *plen, int *peos);
-apr_status_t h2_util_bb_read(apr_bucket_brigade *bb, char *buffer,
- apr_size_t *plen, int *peos);
-
typedef apr_status_t h2_util_pass_cb(void *ctx,
const char *data, apr_size_t len);
#define MOD_H2_VERSION_NUM @PACKAGE_VERSION_NUM@
-/**
- * @macro
- * != 0 iff the nghttp2 library supports data callbacks.
- * Disabled for now since it lowers performance.
- */
-#define NGHTTP2_HAS_DATA_CBXXX @NGHTTP2_HAS_DATA_CB@
-#define NGHTTP2_HAS_DATA_CB 0
-
#endif /* mod_h2_h2_version_h */
}
+/**
+ * Get the next task for the given worker. Will block until a task arrives
+ * or the max_wait timer expires and more than min workers exist.
+ * The previous h2_mplx instance might be passed in and will be served
+ * with preference, since we can ask it for the next task without aquiring
+ * the h2_workers lock.
+ */
static apr_status_t get_mplx_next(h2_worker *worker, h2_mplx **pm,
h2_task **ptask, void *ctx)
{
}
if (!ptask) {
- /* of the worker does not want a next task, we're done.
+ /* the worker does not want a next task, we're done.
*/
return APR_SUCCESS;
}
status = apr_thread_mutex_lock(workers->lock);
if (status == APR_SUCCESS) {
++workers->idle_worker_count;
-
ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, workers->s,
"h2_worker(%d): looking for work", h2_worker_get_id(worker));
/* all fine, we know these ones */
break;
case H2_MPM_PREFORK:
- ap_log_error( APLOG_MARK, APLOG_WARNING, 0, s,
- "This httpd uses mpm_prefork for multiprocessing. "
- "Please take notice that mod_h2 always with run "
- "requests in a multi-threaded environment. If you "
- "use prefork for single-thread connection handling, "
- " mod_h2 might pose problems.");
+ /* ok, we now know how to handle that one */
break;
case H2_MPM_UNKNOWN:
/* ??? */
# limitations under the License.
#
-NGHTTP2_VERSION = 1.0.4
+NGHTTP2_VERSION = 1.0.5
NGHTTP2_DIR = nghttp2-$(NGHTTP2_VERSION)
NGHTTP2_TAR = $(NGHTTP2_DIR).tar.gz
NGHTTP2_URL = https://github.com/tatsuhiro-t/nghttp2/releases/download/v$(NGHTTP2_VERSION)/$(NGHTTP2_TAR)
SCHEME=https
-HOST=test.example.org
PORT=12346
+HOST=test.example.org
URL=$SCHEME://$HOST:$PORT
WRK=../wrk/wrk
H2LOAD=${H2LOAD:-sandbox/install/bin/h2load}
+RUNS=${RUNS:-200000}
+CONNECTIONS=${CONNECTIONS:-100}
+THREADS=${THREADS:-8}
+RUNWRK=0
+
run_wrk() {
path=$1
- $WRK -c100 -t${THREADS:-8} -d30s $URL$path >/tmp/$$.out 2>&1
+ $WRK -c${CONNECTIONS} -t${THREADS} -d15s $URL$path >/tmp/$$.out 2>&1
stat=$( fgrep 'Requests/sec: ' /tmp/$$.out )
reqs=${stat##Requests/sec: }
run_load() {
path=$1
- $H2LOAD -c 100 -t ${THREADS:-8} -m ${MAX_STREAMS:-1} -n 500000 $URL$path > /tmp/$$.out 2>&1
+ $H2LOAD -c ${CONNECTIONS} -t ${THREADS} -m ${MAX_STREAMS:-1} -n ${RUNS} $URL$path > /tmp/$$.out 2>&1
fin=$( fgrep 'finished in ' /tmp/$$.out )
stat=$( fgrep 'requests: ' /tmp/$$.out )
path=$1
max=$2
iterations=$3
- echo -n "m, wrk"
+ echo -n "m"
+ test "$RUNWRK" != 0 && echo -n ", wrk"
for i in $iterations; do
echo -n ", $i"
done
i=1
while [ $i -le $max ]; do
echo -n "$i"
- run_wrk $path
- echo -n ", $reqs"
+ if test "$RUNWRK" != 0; then
+ run_wrk $path
+ echo -n ", $reqs"
+ fi
run_m $path "$iterations"
echo ""
i=$[ i + 1 ]
done
}
-URL_PATH=$1
-
-echo "$URL_PATH, 500k requests, req/s"
-run_iter $URL_PATH 10 "1 2 3 4 5 6 7 8 9 10 15 20 40 100"
+for i in "$@"; do
+ URL_PATH=$i
+ echo "$URL_PATH, $RUNS requests, ${THREADS} threads, ${CONNECTIONS} connections, req/s"
+ run_iter $URL_PATH 5 "1 2 3 4 5 6 7 8 9 10 15 20 40 100"
+ echo
+done
# Defaults to "On"
#H2HackMpmEvent On
+ # If output data should be buffered or written directly.
+ # Defaults to "on" for TLS connections, "off" otherwise.
+ # H2BufferOutput On
+
+ # The size of the output buffer per connection.
+ # Defaults to 64k
+ # H2BufferSize
+
+ # The maximum number of bytes for a write operation from the buffer. This
+ # determines the maximum size of a TLS chunk, when buffering is used.
+ # Defaults to 16k
+ # H2BufferMaxWrite
+
</IfModule>
# SSL Setup
################################################################################
<IfModule ssl_module>
- SSLCipherSuite HIGH:!aNULL:!MD5
+ SSLCipherSuite ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK
SSLProtocol All -SSLv2 -SSLv3
SSLPassPhraseDialog builtin
SSLSessionCache "shmcb:SUBST_SERVER_ROOT_SUBST/logs/ssl_scache(512000)"