long priority; /**< the priority of this entry */
long total_refs; /**< total number of references this entry has had */
-#ifdef USE_ATOMICS
apr_uint32_t pos; /**< the position of this entry in the cache */
-#else
- apr_ssize_t pos;
-#endif
} mem_cache_object_t;
cache_object_t *obj = (cache_object_t *)a;
mem_cache_object_t *mobj = obj->vobj;
-#ifdef USE_ATOMICS
apr_atomic_set32(&mobj->pos, pos);
-#else
- mobj->pos = pos;
-#endif
}
static apr_ssize_t memcache_get_pos(void *a)
{
cache_object_t *obj = (cache_object_t *)a;
mem_cache_object_t *mobj = obj->vobj;
-#ifdef USE_ATOMICS
return apr_atomic_read32(&mobj->pos);
-#else
- return mobj->pos;
-#endif
}
static apr_size_t memcache_cache_get_size(void*a)
* now. Increment the refcount before setting cleanup to avoid a race
* condition. A similar pattern is used in remove_url()
*/
-#ifdef USE_ATOMICS
apr_atomic_inc32(&obj->refcount);
-#else
- obj->refcount++;
-#endif
obj->cleanup = 1;
-#ifdef USE_ATOMICS
if (!apr_atomic_dec32(&obj->refcount)) {
cleanup_cache_object(obj);
}
-#else
- obj->refcount--;
- if (!obj->refcount) {
- cleanup_cache_object(obj);
- }
-#endif
}
/*
* functions return a 'negative' score since priority queues
}
/* Cleanup the cache object */
-#ifdef USE_ATOMICS
if (!apr_atomic_dec32(&obj->refcount)) {
if (obj->cleanup) {
cleanup_cache_object(obj);
}
}
-#else
- if (sconf->lock) {
- apr_thread_mutex_lock(sconf->lock);
- }
- obj->refcount--;
- /* If the object is marked for cleanup and the refcount
- * has dropped to zero, cleanup the object
- */
- if ((obj->cleanup) && (!obj->refcount)) {
- cleanup_cache_object(obj);
- }
- if (sconf->lock) {
- apr_thread_mutex_unlock(sconf->lock);
- }
-#endif
return APR_SUCCESS;
}
static apr_status_t cleanup_cache_mem(void *sconfv)
while (obj) {
/* Iterate over the cache and clean up each entry */
/* Free the object if the recount == 0 */
-#ifdef USE_ATOMICS
apr_atomic_inc32(&obj->refcount);
obj->cleanup = 1;
if (!apr_atomic_dec32(&obj->refcount)) {
-#else
- obj->cleanup = 1;
- if (!obj->refcount) {
-#endif
cleanup_cache_object(obj);
}
obj = cache_pop(co->cache_cache);
}
/* Finish initing the cache object */
-#ifdef USE_ATOMICS
apr_atomic_set32(&obj->refcount, 1);
-#else
- obj->refcount = 1;
-#endif
mobj->total_refs = 1;
obj->complete = 0;
obj->cleanup = 0;
if (obj) {
if (obj->complete) {
request_rec *rmain=r, *rtmp;
-#ifdef USE_ATOMICS
apr_atomic_inc32(&obj->refcount);
-#else
- obj->refcount++;
-#endif
/* cache is worried about overall counts, not 'open' ones */
cache_update(sconf->cache_cache, obj);
cache_remove(sconf->cache_cache, obj);
mobj = (mem_cache_object_t *) obj->vobj;
-#ifdef USE_ATOMICS
/* Refcount increment in this case MUST be made under
* protection of the lock
*/
apr_atomic_inc32(&obj->refcount);
-#else
- if (!obj->refcount) {
- cleanup_cache_object(obj);
- obj = NULL;
- }
-#endif
if (obj) {
obj->cleanup = 1;
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
-#ifdef USE_ATOMICS
if (obj) {
if (!apr_atomic_dec32(&obj->refcount)) {
cleanup_cache_object(obj);
}
}
-#endif
return OK;
}