timer_event_t *te;
const apr_pollfd_t *out_pfd;
apr_int32_t num = 0;
+ apr_uint32_t c_count, l_count, i_count;
apr_interval_time_t timeout_interval;
apr_time_t now;
int workers_were_busy = 0;
"All workers busy, not accepting new conns "
"in this process");
}
- else if ( (int)apr_atomic_read32(&connection_count)
- - (int)apr_atomic_read32(&lingering_count)
- > threads_per_child
- + ap_queue_info_get_idlers(worker_queue_info) *
- worker_factor / WORKER_FACTOR_SCALE)
+ else if ((c_count = apr_atomic_read32(&connection_count))
+ > (l_count = apr_atomic_read32(&lingering_count))
+ && (c_count - l_count
+ > ap_queue_info_get_idlers(worker_queue_info)
+ * worker_factor / WORKER_FACTOR_SCALE
+ + threads_per_child))
{
if (!listeners_disabled)
disable_listensocks(process_slot);
ps->lingering_close = apr_atomic_read32(&lingering_count);
}
if (listeners_disabled && !workers_were_busy
- && (int)apr_atomic_read32(&connection_count)
- - (int)apr_atomic_read32(&lingering_count)
- < ((int)ap_queue_info_get_idlers(worker_queue_info) - 1)
- * worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
+ && ((c_count = apr_atomic_read32(&connection_count))
+ >= (l_count = apr_atomic_read32(&lingering_count))
+ && (i_count = ap_queue_info_get_idlers(worker_queue_info)) > 0
+ && (c_count - l_count
+ < (i_count - 1) * worker_factor / WORKER_FACTOR_SCALE
+ + threads_per_child)))
{
listeners_disabled = 0;
enable_listensocks(process_slot);
apr_pool_t * pool_to_recycle)
{
apr_status_t rv;
- apr_int32_t prev_idlers;
ap_push_pool(queue_info, pool_to_recycle);
- /* Atomically increment the count of idle workers */
- prev_idlers = apr_atomic_inc32(&(queue_info->idlers)) - zero_pt;
-
/* If other threads are waiting on a worker, wake one up */
- if (prev_idlers < 0) {
+ if (apr_atomic_inc32(&queue_info->idlers) < zero_pt) {
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
- apr_int32_t new_idlers;
- new_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
- if (--new_idlers <= 0) {
+ /* Don't block if there isn't any idle worker.
+ * apr_atomic_add32(x, -1) does the same as dec32(x), except
+ * that it returns the previous value (unlike dec32's bool).
+ *
+ * XXX: why don't we consume the last idler?
+ */
+ if (apr_atomic_add32(&(queue_info->idlers), -1) <= zero_pt + 1) {
apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */
return APR_EAGAIN;
}
int *had_to_block)
{
apr_status_t rv;
- apr_int32_t prev_idlers;
- /* Atomically decrement the idle worker count, saving the old value */
- /* See TODO in ap_queue_info_set_idle() */
- prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
-
- /* Block if there weren't any idle workers */
- if (prev_idlers <= 0) {
+ /* Block if there isn't any idle worker.
+ * apr_atomic_add32(x, -1) does the same as dec32(x), except
+ * that it returns the previous value (unlike dec32's bool).
+ */
+ if (apr_atomic_add32(&queue_info->idlers, -1) <= zero_pt) {
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
- /* See TODO in ap_queue_info_set_idle() */
apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */
return rv;
}
apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
{
- apr_int32_t val;
- val = (apr_int32_t)apr_atomic_read32(&queue_info->idlers) - zero_pt;
- if (val < 0)
+ apr_uint32_t val;
+ val = apr_atomic_read32(&queue_info->idlers);
+ if (val <= zero_pt)
return 0;
- return val;
+ return val - zero_pt;
}
void ap_push_pool(fd_queue_info_t * queue_info,
apr_signal(LISTENER_SIGNAL, dummy_signal_handler);
for (;;) {
+ apr_uint32_t i_count;
int workers_were_busy = 0;
if (listener_may_exit) {
close_listeners(process_slot, &closed);
}
else if (pt->type == PT_ACCEPT) {
int skip_accept = 0;
- int connection_count_local = connection_count;
+ apr_uint32_t connection_count_local = connection_count;
/* A Listener Socket is ready for an accept() */
if (workers_were_busy) {
listeners_disabled = 0;
enable_listensocks(process_slot);
}
- else if (connection_count_local > threads_per_child
- + ap_queue_info_get_idlers(worker_queue_info) *
- worker_factor / WORKER_FACTOR_SCALE)
+ else if (connection_count_local >
+ (ap_queue_info_get_idlers(worker_queue_info)
+ * worker_factor / WORKER_FACTOR_SCALE
+ + threads_per_child))
{
skip_accept = 1;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
ps->suspended = apr_atomic_read32(&suspended_count);
ps->lingering_close = apr_atomic_read32(&lingering_count);
}
- if (listeners_disabled && !workers_were_busy &&
- (int)apr_atomic_read32(&connection_count) <
- ((int)ap_queue_info_get_idlers(worker_queue_info) - 1) *
- worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
+ if (listeners_disabled && !workers_were_busy
+ && (i_count = ap_queue_info_get_idlers(worker_queue_info)) > 0
+ && (apr_atomic_read32(&connection_count)
+ < (i_count - 1) * worker_factor / WORKER_FACTOR_SCALE
+ + threads_per_child))
{
listeners_disabled = 0;
enable_listensocks(process_slot);
apr_pool_t * pool_to_recycle)
{
apr_status_t rv;
- apr_int32_t prev_idlers;
ap_push_pool(queue_info, pool_to_recycle);
- /* Atomically increment the count of idle workers */
- prev_idlers = apr_atomic_inc32(&(queue_info->idlers)) - zero_pt;
-
/* If other threads are waiting on a worker, wake one up */
- if (prev_idlers < 0) {
+ if (apr_atomic_inc32(&queue_info->idlers) < zero_pt) {
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
- apr_int32_t new_idlers;
- new_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
- if (--new_idlers <= 0) {
+ /* Don't block if there isn't any idle worker.
+ * apr_atomic_add32(x, -1) does the same as dec32(x), except
+ * that it returns the previous value (whereas dec32 is a bool).
+ *
+ * XXX: why don't we consume the last idler?
+ */
+ if (apr_atomic_add32(&(queue_info->idlers), -1) <= zero_pt + 1) {
apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */
return APR_EAGAIN;
}
int *had_to_block)
{
apr_status_t rv;
- apr_int32_t prev_idlers;
- /* Atomically decrement the idle worker count, saving the old value */
- /* See TODO in ap_queue_info_set_idle() */
- prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
-
- /* Block if there weren't any idle workers */
- if (prev_idlers <= 0) {
+ /* Block if there isn't any idle worker.
+ * apr_atomic_add32(x, -1) does the same as dec32(x), except
+ * that it returns the previous value (whereas dec32 is a bool).
+ */
+ if (apr_atomic_add32(&queue_info->idlers, -1) <= zero_pt) {
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
- /* See TODO in ap_queue_info_set_idle() */
apr_atomic_inc32(&(queue_info->idlers)); /* back out dec */
return rv;
}
apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
{
- apr_int32_t val;
- val = (apr_int32_t)apr_atomic_read32(&queue_info->idlers) - zero_pt;
- if (val < 0)
+ apr_uint32_t val;
+ val = apr_atomic_read32(&queue_info->idlers);
+ if (val <= zero_pt)
return 0;
- return val;
+ return val - zero_pt;
}
void ap_push_pool(fd_queue_info_t * queue_info,