From abb1b803d66cabc01008cdb9263e9cf45fd88f5b Mon Sep 17 00:00:00 2001 From: Yann Ylavic Date: Fri, 5 Dec 2014 13:41:38 +0000 Subject: [PATCH] mpm_event(opt): avoid casts/comparisons from unsigned to signed (atomics). git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1643279 13f79535-47bb-0310-9956-ffa450edef68 --- server/mpm/event/event.c | 22 +++++++++++--------- server/mpm/event/fdqueue.c | 37 ++++++++++++++++------------------ server/mpm/eventopt/eventopt.c | 19 +++++++++-------- server/mpm/eventopt/fdqueue.c | 37 ++++++++++++++++------------------ 4 files changed, 58 insertions(+), 57 deletions(-) diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c index 32e6e53f5b..ccd8896f5d 100644 --- a/server/mpm/event/event.c +++ b/server/mpm/event/event.c @@ -1692,6 +1692,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) timer_event_t *te; const apr_pollfd_t *out_pfd; apr_int32_t num = 0; + apr_uint32_t c_count, l_count, i_count; apr_interval_time_t timeout_interval; apr_time_t now; int workers_were_busy = 0; @@ -1865,11 +1866,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) "All workers busy, not accepting new conns " "in this process"); } - else if ( (int)apr_atomic_read32(&connection_count) - - (int)apr_atomic_read32(&lingering_count) - > threads_per_child - + ap_queue_info_get_idlers(worker_queue_info) * - worker_factor / WORKER_FACTOR_SCALE) + else if ((c_count = apr_atomic_read32(&connection_count)) + > (l_count = apr_atomic_read32(&lingering_count)) + && (c_count - l_count + > ap_queue_info_get_idlers(worker_queue_info) + * worker_factor / WORKER_FACTOR_SCALE + + threads_per_child)) { if (!listeners_disabled) disable_listensocks(process_slot); @@ -2030,10 +2032,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) ps->lingering_close = apr_atomic_read32(&lingering_count); } if (listeners_disabled && !workers_were_busy - && (int)apr_atomic_read32(&connection_count) - - (int)apr_atomic_read32(&lingering_count) - < ((int)ap_queue_info_get_idlers(worker_queue_info) - 1) - * worker_factor / WORKER_FACTOR_SCALE + threads_per_child) + && ((c_count = apr_atomic_read32(&connection_count)) + >= (l_count = apr_atomic_read32(&lingering_count)) + && (i_count = ap_queue_info_get_idlers(worker_queue_info)) > 0 + && (c_count - l_count + < (i_count - 1) * worker_factor / WORKER_FACTOR_SCALE + + threads_per_child))) { listeners_disabled = 0; enable_listensocks(process_slot); diff --git a/server/mpm/event/fdqueue.c b/server/mpm/event/fdqueue.c index 485a98dc10..b9769f22e9 100644 --- a/server/mpm/event/fdqueue.c +++ b/server/mpm/event/fdqueue.c @@ -97,15 +97,11 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info, apr_pool_t * pool_to_recycle) { apr_status_t rv; - apr_int32_t prev_idlers; ap_push_pool(queue_info, pool_to_recycle); - /* Atomically increment the count of idle workers */ - prev_idlers = apr_atomic_inc32(&(queue_info->idlers)) - zero_pt; - /* If other threads are waiting on a worker, wake one up */ - if (prev_idlers < 0) { + if (apr_atomic_inc32(&queue_info->idlers) < zero_pt) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { AP_DEBUG_ASSERT(0); @@ -127,9 +123,13 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info, apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info) { - apr_int32_t new_idlers; - new_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt; - if (--new_idlers <= 0) { + /* Don't block if there isn't any idle worker. + * apr_atomic_add32(x, -1) does the same as dec32(x), except + * that it returns the previous value (unlike dec32's bool). + * + * XXX: why don't we consume the last idler? + */ + if (apr_atomic_add32(&(queue_info->idlers), -1) <= zero_pt + 1) { apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */ return APR_EAGAIN; } @@ -140,18 +140,15 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info, int *had_to_block) { apr_status_t rv; - apr_int32_t prev_idlers; - /* Atomically decrement the idle worker count, saving the old value */ - /* See TODO in ap_queue_info_set_idle() */ - prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt; - - /* Block if there weren't any idle workers */ - if (prev_idlers <= 0) { + /* Block if there isn't any idle worker. + * apr_atomic_add32(x, -1) does the same as dec32(x), except + * that it returns the previous value (unlike dec32's bool). + */ + if (apr_atomic_add32(&queue_info->idlers, -1) <= zero_pt) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { AP_DEBUG_ASSERT(0); - /* See TODO in ap_queue_info_set_idle() */ apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */ return rv; } @@ -205,11 +202,11 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info, apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info) { - apr_int32_t val; - val = (apr_int32_t)apr_atomic_read32(&queue_info->idlers) - zero_pt; - if (val < 0) + apr_uint32_t val; + val = apr_atomic_read32(&queue_info->idlers); + if (val <= zero_pt) return 0; - return val; + return val - zero_pt; } void ap_push_pool(fd_queue_info_t * queue_info, diff --git a/server/mpm/eventopt/eventopt.c b/server/mpm/eventopt/eventopt.c index a5d1c7881d..f30e94c7db 100644 --- a/server/mpm/eventopt/eventopt.c +++ b/server/mpm/eventopt/eventopt.c @@ -1543,6 +1543,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) apr_signal(LISTENER_SIGNAL, dummy_signal_handler); for (;;) { + apr_uint32_t i_count; int workers_were_busy = 0; if (listener_may_exit) { close_listeners(process_slot, &closed); @@ -1689,7 +1690,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) } else if (pt->type == PT_ACCEPT) { int skip_accept = 0; - int connection_count_local = connection_count; + apr_uint32_t connection_count_local = connection_count; /* A Listener Socket is ready for an accept() */ if (workers_were_busy) { @@ -1702,9 +1703,10 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) listeners_disabled = 0; enable_listensocks(process_slot); } - else if (connection_count_local > threads_per_child - + ap_queue_info_get_idlers(worker_queue_info) * - worker_factor / WORKER_FACTOR_SCALE) + else if (connection_count_local > + (ap_queue_info_get_idlers(worker_queue_info) + * worker_factor / WORKER_FACTOR_SCALE + + threads_per_child)) { skip_accept = 1; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, @@ -1847,10 +1849,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy) ps->suspended = apr_atomic_read32(&suspended_count); ps->lingering_close = apr_atomic_read32(&lingering_count); } - if (listeners_disabled && !workers_were_busy && - (int)apr_atomic_read32(&connection_count) < - ((int)ap_queue_info_get_idlers(worker_queue_info) - 1) * - worker_factor / WORKER_FACTOR_SCALE + threads_per_child) + if (listeners_disabled && !workers_were_busy + && (i_count = ap_queue_info_get_idlers(worker_queue_info)) > 0 + && (apr_atomic_read32(&connection_count) + < (i_count - 1) * worker_factor / WORKER_FACTOR_SCALE + + threads_per_child)) { listeners_disabled = 0; enable_listensocks(process_slot); diff --git a/server/mpm/eventopt/fdqueue.c b/server/mpm/eventopt/fdqueue.c index d32a837f70..23f4523c39 100644 --- a/server/mpm/eventopt/fdqueue.c +++ b/server/mpm/eventopt/fdqueue.c @@ -97,15 +97,11 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info, apr_pool_t * pool_to_recycle) { apr_status_t rv; - apr_int32_t prev_idlers; ap_push_pool(queue_info, pool_to_recycle); - /* Atomically increment the count of idle workers */ - prev_idlers = apr_atomic_inc32(&(queue_info->idlers)) - zero_pt; - /* If other threads are waiting on a worker, wake one up */ - if (prev_idlers < 0) { + if (apr_atomic_inc32(&queue_info->idlers) < zero_pt) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { AP_DEBUG_ASSERT(0); @@ -127,9 +123,13 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info, apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info) { - apr_int32_t new_idlers; - new_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt; - if (--new_idlers <= 0) { + /* Don't block if there isn't any idle worker. + * apr_atomic_add32(x, -1) does the same as dec32(x), except + * that it returns the previous value (whereas dec32 is a bool). + * + * XXX: why don't we consume the last idler? + */ + if (apr_atomic_add32(&(queue_info->idlers), -1) <= zero_pt + 1) { apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */ return APR_EAGAIN; } @@ -140,18 +140,15 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info, int *had_to_block) { apr_status_t rv; - apr_int32_t prev_idlers; - /* Atomically decrement the idle worker count, saving the old value */ - /* See TODO in ap_queue_info_set_idle() */ - prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt; - - /* Block if there weren't any idle workers */ - if (prev_idlers <= 0) { + /* Block if there isn't any idle worker. + * apr_atomic_add32(x, -1) does the same as dec32(x), except + * that it returns the previous value (whereas dec32 is a bool). + */ + if (apr_atomic_add32(&queue_info->idlers, -1) <= zero_pt) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { AP_DEBUG_ASSERT(0); - /* See TODO in ap_queue_info_set_idle() */ apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */ return rv; } @@ -205,11 +202,11 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info, apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info) { - apr_int32_t val; - val = (apr_int32_t)apr_atomic_read32(&queue_info->idlers) - zero_pt; - if (val < 0) + apr_uint32_t val; + val = apr_atomic_read32(&queue_info->idlers); + if (val <= zero_pt) return 0; - return val; + return val - zero_pt; } void ap_push_pool(fd_queue_info_t * queue_info, -- 2.40.0