From 409b44d15896c26b801f440ca376d5191aa9de26 Mon Sep 17 00:00:00 2001 From: Graham Leggett Date: Sat, 16 Nov 2013 20:05:42 +0000 Subject: [PATCH] event: Use skiplist and fold in performance tweeks from trunk trunk patch: http://svn.apache.org/viewvc?view=revision&revision=1451706 http://svn.apache.org/viewvc?view=revision&revision=1517365 http://svn.apache.org/viewvc?view=revision&revision=1529442 2.4.x patch: http://people.apache.org/~jim/patches/httpd-2.4-event-v3.patch Submitted by: jim Reviewed by: rjung, minfrin git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1542560 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 + STATUS | 13 ---- server/mpm/config.m4 | 11 ++++ server/mpm/event/config.m4 | 2 + server/mpm/event/event.c | 127 ++++++++++++++++++++++--------------- 5 files changed, 92 insertions(+), 64 deletions(-) diff --git a/CHANGES b/CHANGES index 7c9aab9683..29286d236b 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,9 @@ Changes with Apache 2.4.7 + *) event: Use skiplist data structure (requires APR 1.5). + [Jim Jagielski] + *) mpm_unix: Add ap_mpm_podx_* implementation to avoid code duplication and align w/ trunk. [Jim Jagielski] diff --git a/STATUS b/STATUS index aba4d528b6..d513fef924 100644 --- a/STATUS +++ b/STATUS @@ -97,19 +97,6 @@ RELEASE SHOWSTOPPERS: PATCHES ACCEPTED TO BACKPORT FROM TRUNK: [ start all new proposals below, under PATCHES PROPOSED. ] - * event: Use skiplist and fold in performance tweeks from trunk - trunk patch: http://svn.apache.org/viewvc?view=revision&revision=1451706 - http://svn.apache.org/viewvc?view=revision&revision=1517365 - http://svn.apache.org/viewvc?view=revision&revision=1529442 - 2.4.x patch: http://people.apache.org/~jim/patches/httpd-2.4-event-v3.patch - +1: jim, rjung, minfrin - trawick: needs something like r1529442 to disable event if APR 1.4.x is being used; - (since eventopt skiplist changes aren't proposed, r1529442 would need to - be tweaked) - jj: Done in v3 of patch - jj: Reminder: this patch has been running for a LONG time - on ASF infra... - PATCHES PROPOSED TO BACKPORT FROM TRUNK: diff --git a/server/mpm/config.m4 b/server/mpm/config.m4 index 0b263cbc56..07118bd904 100644 --- a/server/mpm/config.m4 +++ b/server/mpm/config.m4 @@ -38,6 +38,17 @@ AC_CACHE_CHECK([whether APR supports thread-safe pollsets], [ac_cv_have_threadsa fi ]) +dnl See if APR has skiplist +dnl The base httpd prereq is APR 1.4.x, so we don't have to consider +dnl earlier versions. +case $APR_VERSION in + 1.4*) + apr_has_skiplist=no + ;; + *) + apr_has_skiplist=yes +esac + dnl See if this is a forking platform w.r.t. MPMs case $host in *mingw32* | *os2-emx*) diff --git a/server/mpm/event/config.m4 b/server/mpm/event/config.m4 index 351f1acf4b..c891c75805 100644 --- a/server/mpm/event/config.m4 +++ b/server/mpm/event/config.m4 @@ -7,6 +7,8 @@ elif test $have_threaded_sig_graceful != yes; then AC_MSG_RESULT(no - SIG_GRACEFUL cannot be used with a threaded MPM) elif test $ac_cv_have_threadsafe_pollset != yes; then AC_MSG_RESULT(no - APR_POLLSET_THREADSAFE is not supported) +elif test $apr_has_skiplist != yes; then + AC_MSG_RESULT(no - APR skiplist is not available, need APR 1.5.x or later) else AC_MSG_RESULT(yes) APACHE_MPM_SUPPORTED(event, yes, yes) diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c index 77a64ac29e..306837dbd1 100644 --- a/server/mpm/event/event.c +++ b/server/mpm/event/event.c @@ -91,6 +91,7 @@ #include "mpm_default.h" #include "http_vhost.h" #include "unixd.h" +#include "apr_skiplist.h" #include #include /* for INT_MAX */ @@ -1025,8 +1026,6 @@ read_request: return; } else if (cs->pub.state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) { - apr_status_t rc; - /* It greatly simplifies the logic to use a single timeout value here * because the new element can just be added to the end of the list and * it will stay sorted in expiration time sequence. If brand new @@ -1063,7 +1062,6 @@ read_request: * or timeout. */ c->sbh = NULL; - return; } @@ -1227,34 +1225,44 @@ static void get_worker(int *have_idle_worker_p, int blocking, int *all_busy) } } -/* XXXXXX: Convert to skiplist or other better data structure - * (yes, this is VERY VERY VERY VERY BAD) - */ - /* Structures to reuse */ static APR_RING_HEAD(timer_free_ring_t, timer_event_t) timer_free_ring; -/* Active timers */ -static APR_RING_HEAD(timer_ring_t, timer_event_t) timer_ring; -static apr_thread_mutex_t *g_timer_ring_mtx; +static apr_skiplist *timer_skiplist; + +static int indexing_comp(void *a, void *b) +{ + apr_time_t t1 = (apr_time_t) (((timer_event_t *) a)->when); + apr_time_t t2 = (apr_time_t) (((timer_event_t *) b)->when); + AP_DEBUG_ASSERT(t1); + AP_DEBUG_ASSERT(t2); + return ((t1 < t2) ? -1 : ((t1 > t2) ? 1 : 0)); +} + +static int indexing_compk(void *ac, void *b) +{ + apr_time_t *t1 = (apr_time_t *) ac; + apr_time_t t2 = (apr_time_t) (((timer_event_t *) b)->when); + AP_DEBUG_ASSERT(t2); + return ((*t1 < t2) ? -1 : ((*t1 > t2) ? 1 : 0)); +} + +static apr_thread_mutex_t *g_timer_skiplist_mtx; static apr_status_t event_register_timed_callback(apr_time_t t, ap_mpm_callback_fn_t *cbfn, void *baton) { - int inserted = 0; - timer_event_t *ep; timer_event_t *te; /* oh yeah, and make locking smarter/fine grained. */ - apr_thread_mutex_lock(g_timer_ring_mtx); + apr_thread_mutex_lock(g_timer_skiplist_mtx); if (!APR_RING_EMPTY(&timer_free_ring, timer_event_t, link)) { te = APR_RING_FIRST(&timer_free_ring); APR_RING_REMOVE(te, link); } else { - /* XXXXX: lol, pool allocation without a context from any thread.Yeah. Right. MPMs Suck. */ - te = ap_malloc(sizeof(timer_event_t)); + te = apr_skiplist_alloc(timer_skiplist, sizeof(timer_event_t)); APR_RING_ELEM_INIT(te, link); } @@ -1264,27 +1272,14 @@ static apr_status_t event_register_timed_callback(apr_time_t t, te->when = t + apr_time_now(); /* Okay, insert sorted by when.. */ - for (ep = APR_RING_FIRST(&timer_ring); - ep != APR_RING_SENTINEL(&timer_ring, - timer_event_t, link); - ep = APR_RING_NEXT(ep, link)) - { - if (ep->when > te->when) { - inserted = 1; - APR_RING_INSERT_BEFORE(ep, te, link); - break; - } - } - - if (!inserted) { - APR_RING_INSERT_TAIL(&timer_ring, te, timer_event_t, link); - } + apr_skiplist_insert(timer_skiplist, (void *)te); - apr_thread_mutex_unlock(g_timer_ring_mtx); + apr_thread_mutex_unlock(g_timer_skiplist_mtx); return APR_SUCCESS; } + /* * Close socket and clean up if remote closed its end while we were in * lingering close. @@ -1306,7 +1301,7 @@ static void process_lingering_close(event_conn_state_t *cs, const apr_pollfd_t * rv = apr_socket_recv(csd, dummybuf, &nbytes); } while (rv == APR_SUCCESS); - if (!APR_STATUS_IS_EOF(rv)) { + if (APR_STATUS_IS_EAGAIN(rv)) { return; } @@ -1447,9 +1442,9 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) } } - apr_thread_mutex_lock(g_timer_ring_mtx); - if (!APR_RING_EMPTY(&timer_ring, timer_event_t, link)) { - te = APR_RING_FIRST(&timer_ring); + apr_thread_mutex_lock(g_timer_skiplist_mtx); + te = apr_skiplist_peek(timer_skiplist); + if (te) { if (te->when > now) { timeout_interval = te->when - now; } @@ -1460,7 +1455,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) else { timeout_interval = apr_time_from_msec(100); } - apr_thread_mutex_unlock(g_timer_ring_mtx); + apr_thread_mutex_unlock(g_timer_skiplist_mtx); rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd); if (rc != APR_SUCCESS) { @@ -1483,21 +1478,19 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) } now = apr_time_now(); - apr_thread_mutex_lock(g_timer_ring_mtx); - for (ep = APR_RING_FIRST(&timer_ring); - ep != APR_RING_SENTINEL(&timer_ring, - timer_event_t, link); - ep = APR_RING_FIRST(&timer_ring)) - { + apr_thread_mutex_lock(g_timer_skiplist_mtx); + ep = apr_skiplist_peek(timer_skiplist); + while (ep) { if (ep->when < now + EVENT_FUDGE_FACTOR) { - APR_RING_REMOVE(ep, link); + apr_skiplist_pop(timer_skiplist, NULL); push_timer2worker(ep); } else { break; } + ep = apr_skiplist_peek(timer_skiplist); } - apr_thread_mutex_unlock(g_timer_ring_mtx); + apr_thread_mutex_unlock(g_timer_skiplist_mtx); while (num) { pt = (listener_poll_type *) out_pfd->client_data; @@ -1811,9 +1804,9 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy) te->cbfunc(te->baton); { - apr_thread_mutex_lock(g_timer_ring_mtx); + apr_thread_mutex_lock(g_timer_skiplist_mtx); APR_RING_INSERT_TAIL(&timer_free_ring, te, timer_event_t, link); - apr_thread_mutex_unlock(g_timer_ring_mtx); + apr_thread_mutex_unlock(g_timer_skiplist_mtx); } } else { @@ -1887,6 +1880,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy) int loops; int prev_threads_created; int max_recycled_pools = -1; + int good_methods[] = {APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL}; /* We must create the fd queues before we start up the listener * and worker threads. */ @@ -1925,18 +1919,34 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy) } /* Create the main pollset */ - rv = apr_pollset_create(&event_pollset, - threads_per_child, /* XXX don't we need more, to handle + for (i = 0; i < sizeof(good_methods) / sizeof(void*); i++) { + rv = apr_pollset_create_ex(&event_pollset, + threads_per_child*2, /* XXX don't we need more, to handle * connections in K-A or lingering * close? */ - pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY); + pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY | APR_POLLSET_NODEFAULT, + good_methods[i]); + if (rv == APR_SUCCESS) { + break; + } + } + if (rv != APR_SUCCESS) { + rv = apr_pollset_create(&event_pollset, + threads_per_child*2, /* XXX don't we need more, to handle + * connections in K-A or lingering + * close? + */ + pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY); + } if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, "apr_pollset_create with Thread Safety failed."); clean_child_exit(APEXIT_CHILDFATAL); } + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471) + "start_threads: Using %s", apr_pollset_method_name(event_pollset)); worker_sockets = apr_pcalloc(pchild, threads_per_child * sizeof(apr_socket_t *)); @@ -2097,9 +2107,10 @@ static void child_main(int child_num_arg) clean_child_exit(APEXIT_CHILDFATAL); } - apr_thread_mutex_create(&g_timer_ring_mtx, APR_THREAD_MUTEX_DEFAULT, pchild); + apr_thread_mutex_create(&g_timer_skiplist_mtx, APR_THREAD_MUTEX_DEFAULT, pchild); APR_RING_INIT(&timer_free_ring, timer_event_t, link); - APR_RING_INIT(&timer_ring, timer_event_t, link); + apr_skiplist_init(&timer_skiplist, pchild); + apr_skiplist_set_compare(timer_skiplist, indexing_comp, indexing_compk); ap_run_child_init(pchild, ap_server_conf); /* done with init critical section */ @@ -2820,6 +2831,8 @@ static int event_open_logs(apr_pool_t * p, apr_pool_t * plog, return DONE; } } + /* for skiplist */ + srand((unsigned int)apr_time_now()); return OK; } @@ -2853,6 +2866,18 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog, } ++retained->module_loads; if (retained->module_loads == 2) { + int i; + static apr_uint32_t foo = 0; + + apr_atomic_inc32(&foo); + apr_atomic_dec32(&foo); + apr_atomic_dec32(&foo); + i = apr_atomic_dec32(&foo); + if (i >= 0) { + ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL, APLOGNO(02405) + "atomics not working as expected"); + return HTTP_INTERNAL_SERVER_ERROR; + } rv = apr_pollset_create(&event_pollset, 1, plog, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY); if (rv != APR_SUCCESS) { -- 2.50.1