mem_cache_object_t *mobj = obj->vobj;
#ifdef USE_ATOMICS
- apr_atomic_set(&mobj->pos, pos);
+ apr_atomic_set32(&mobj->pos, pos);
#else
mobj->pos = pos;
#endif
mem_cache_object_t *mobj = obj->vobj;
#ifdef USE_ATOMICS
- return apr_atomic_read(&mobj->pos);
+ return apr_atomic_read32(&mobj->pos);
#else
return mobj->pos;
#endif
* condition. A similar pattern is used in remove_url()
*/
#ifdef USE_ATOMICS
- apr_atomic_inc(&obj->refcount);
+ apr_atomic_inc32(&obj->refcount);
#else
obj->refcount++;
#endif
obj->cleanup = 1;
#ifdef USE_ATOMICS
- if (!apr_atomic_dec(&obj->refcount)) {
+ if (!apr_atomic_dec32(&obj->refcount)) {
cleanup_cache_object(obj);
}
#else
/* Cleanup the cache object */
#ifdef USE_ATOMICS
- if (!apr_atomic_dec(&obj->refcount)) {
+ if (!apr_atomic_dec32(&obj->refcount)) {
if (obj->cleanup) {
cleanup_cache_object(obj);
}
/* Iterate over the cache and clean up each entry */
/* Free the object if the recount == 0 */
#ifdef USE_ATOMICS
- apr_atomic_inc(&obj->refcount);
+ apr_atomic_inc32(&obj->refcount);
obj->cleanup = 1;
- if (!apr_atomic_dec(&obj->refcount)) {
+ if (!apr_atomic_dec32(&obj->refcount)) {
#else
obj->cleanup = 1;
if (!obj->refcount) {
/* Finish initing the cache object */
#ifdef USE_ATOMICS
- apr_atomic_set(&obj->refcount, 1);
+ apr_atomic_set32(&obj->refcount, 1);
#else
obj->refcount = 1;
#endif
if (obj->complete) {
request_rec *rmain=r, *rtmp;
#ifdef USE_ATOMICS
- apr_atomic_inc(&obj->refcount);
+ apr_atomic_inc32(&obj->refcount);
#else
obj->refcount++;
#endif
/* Refcount increment in this case MUST be made under
* protection of the lock
*/
- apr_atomic_inc(&obj->refcount);
+ apr_atomic_inc32(&obj->refcount);
#else
if (!obj->refcount) {
cleanup_cache_object(obj);
}
#ifdef USE_ATOMICS
if (obj) {
- if (!apr_atomic_dec(&obj->refcount)) {
+ if (!apr_atomic_dec32(&obj->refcount)) {
cleanup_cache_object(obj);
}
}
*/
typedef struct {
/* 'state' consists of several fields concatenated into a
- * single 32-bit int for use with the apr_atomic_cas() API:
+ * single 32-bit int for use with the apr_atomic_cas32() API:
* state & STACK_FIRST is the thread ID of the first thread
* in a linked list of idle threads
* state & STACK_TERMINATED indicates whether the proc is shutting down
if (state & STACK_TERMINATED) {
return APR_EINVAL;
}
- if (apr_atomic_cas(&(stack->state), STACK_LIST_END, state) !=
+ if (apr_atomic_cas32(&(stack->state), STACK_LIST_END, state) !=
state) {
continue;
}
}
}
wakeup->next = state;
- if (apr_atomic_cas(&(stack->state), worker_id, state) != state) {
+ if (apr_atomic_cas32(&(stack->state), worker_id, state) != state) {
continue;
}
else {
apr_uint32_t state = stack->state;
apr_uint32_t first = state & STACK_FIRST;
if (first == STACK_LIST_END) {
- if (apr_atomic_cas(&(stack->state), state | STACK_NO_LISTENER,
- state) != state) {
+ if (apr_atomic_cas32(&(stack->state), state | STACK_NO_LISTENER,
+ state) != state) {
continue;
}
else {
}
else {
worker_wakeup_info *wakeup = worker_wakeups[first];
- if (apr_atomic_cas(&(stack->state), (state ^ first) | wakeup->next,
- state) != state) {
+ if (apr_atomic_cas32(&(stack->state), (state ^ first) | wakeup->next,
+ state) != state) {
continue;
}
else {
while (1) {
apr_uint32_t state = stack->state;
- if (apr_atomic_cas(&(stack->state), state | STACK_TERMINATED,
- state) == state) {
+ if (apr_atomic_cas32(&(stack->state), state | STACK_TERMINATED,
+ state) == state) {
break;
}
}
context->accept_socket = INVALID_SOCKET;
context->ba = apr_bucket_alloc_create(pchild);
- apr_atomic_inc(&num_completion_contexts);
+ apr_atomic_inc32(&num_completion_contexts);
break;
}
} else {
mpm_recycle_completion_context(context);
- apr_atomic_inc(&g_blocked_threads);
+ apr_atomic_inc32(&g_blocked_threads);
while (1) {
if (workers_may_exit) {
- apr_atomic_dec(&g_blocked_threads);
+ apr_atomic_dec32(&g_blocked_threads);
return NULL;
}
rc = GetQueuedCompletionStatus(ThreadDispatchIOCP, &BytesRead, &CompKey,
context = CONTAINING_RECORD(pol, COMP_CONTEXT, Overlapped);
break;
case IOCP_SHUTDOWN:
- apr_atomic_dec(&g_blocked_threads);
+ apr_atomic_dec32(&g_blocked_threads);
return NULL;
default:
- apr_atomic_dec(&g_blocked_threads);
+ apr_atomic_dec32(&g_blocked_threads);
return NULL;
}
break;
}
- apr_atomic_dec(&g_blocked_threads);
+ apr_atomic_dec32(&g_blocked_threads);
return context;
}