}
++retained->module_loads;
if (retained->module_loads == 2) {
- int i;
- static apr_uint32_t foo = 0;
+ /* test for correct operation of fdqueue */
+ static apr_uint32_t foo1, foo2;
- apr_atomic_inc32(&foo);
- apr_atomic_dec32(&foo);
- apr_atomic_dec32(&foo);
- i = apr_atomic_dec32(&foo);
- if (i >= 0) {
+ apr_atomic_set32(&foo1, 100);
+ foo2 = apr_atomic_add32(&foo1, -10);
+ if (foo2 != 100 || foo1 != 90) {
ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL, APLOGNO(02406)
- "atomics not working as expected");
+ "atomics not working as expected - add32 of negative number");
return HTTP_INTERNAL_SERVER_ERROR;
}
rv = apr_pollset_create(&event_pollset, 1, plog,
qi->recycled_pools = NULL;
qi->max_recycled_pools = max_recycled_pools;
qi->max_idlers = max_idlers;
+ qi->idlers = zero_pt;
apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
apr_pool_cleanup_null);
ap_push_pool(queue_info, pool_to_recycle);
/* Atomically increment the count of idle workers */
- /*
- * TODO: The atomics expect unsigned whereas we're using signed.
- * Need to double check that they work as expected or else
- * rework how we determine blocked.
- * UPDATE: Correct operation is performed during open_logs()
- */
- prev_idlers = apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers));
+ prev_idlers = apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)) - zero_pt;
/* If other threads are waiting on a worker, wake one up */
if (prev_idlers < 0) {
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
int prev_idlers;
- prev_idlers = apr_atomic_dec32((apr_uint32_t *)&(queue_info->idlers));
+ prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers), -1) - zero_pt;
if (prev_idlers <= 0) {
apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /* back out dec */
return APR_EAGAIN;
/* Atomically decrement the idle worker count, saving the old value */
/* See TODO in ap_queue_info_set_idle() */
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers), -1);
+ prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers), -1) - zero_pt;
/* Block if there weren't any idle workers */
if (prev_idlers <= 0) {
* now non-negative, it's safe for this function to
* return immediately.
*
- * A negative value in queue_info->idlers tells how many
+ * A "negative value" (relative to zero_pt) in
+ * queue_info->idlers tells how many
* threads are waiting on an idle worker.
*/
- if (queue_info->idlers < 0) {
+ if (queue_info->idlers < zero_pt) {
*had_to_block = 1;
rv = apr_thread_cond_wait(queue_info->wait_for_idler,
queue_info->idlers_mutex);
apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
{
apr_int32_t val;
- val = (apr_int32_t)apr_atomic_read32((apr_uint32_t *)&queue_info->idlers);
+ val = (apr_int32_t)apr_atomic_read32((apr_uint32_t *)&queue_info->idlers) - zero_pt;
if (val < 0)
return 0;
return val;