int internal_functions_count;
int counted; /* the process uses shatred memory */
zend_bool enabled;
+ zend_bool locked; /* thread obtained exclusive lock */
HashTable bind_hash; /* prototype and zval lookup table */
zend_accel_directives accel_directives;
char *cwd; /* current working directory or NULL */
#define S_H(s) g_shared_alloc_handler->s
/* True globals */
-static zend_bool locked;
/* old/new mapping. We can use true global even for ZTS because its usage
is wrapped with exclusive lock anyway */
static HashTable xlat_table;
shared_segments_array_size = ZSMMG(shared_segments_count)*S_H(segment_type_size)();
/* move shared_segments and shared_free to shared memory */
- locked = 1; /* no need to perform a real lock at this point */
+ ZCG(locked) = 1; /* no need to perform a real lock at this point */
p_tmp_shared_globals = (zend_smm_shared_globals *) zend_shared_alloc(sizeof(zend_smm_shared_globals));
tmp_shared_segments = zend_shared_alloc(shared_segments_array_size+ZSMMG(shared_segments_count)*sizeof(void *));
ZSMMG(shared_segments) = tmp_shared_segments;
ZSMMG(shared_memory_state).positions = (int *) zend_shared_alloc(sizeof(int)*ZSMMG(shared_segments_count));
- locked = 0;
+ ZCG(locked) = 0;
return res;
}
#ifndef ZEND_WIN32
close(lock_file);
#endif
- locked = 0;
}
#define SHARED_ALLOC_FAILED() { \
{
int i;
unsigned int block_size = size+sizeof(zend_shared_memory_block_header);
+ TSRMLS_FETCH();
#if 1
- if (!locked) {
+ if (!ZCG(locked)) {
zend_accel_error(ACCEL_LOG_ERROR, "Shared memory lock not obtained");
}
#endif
void zend_shared_alloc_safe_unlock(TSRMLS_D)
{
- if (locked) {
+ if (ZCG(locked)) {
zend_shared_alloc_unlock(TSRMLS_C);
}
}
zend_shared_alloc_lock_win32();
#endif
- locked=1;
+ ZCG(locked) = 1;
/* Prepare translation table
*
/* Destroy translation table */
zend_hash_destroy(&xlat_table);
- locked=0;
+ ZCG(locked) = 0;
#ifndef ZEND_WIN32
if (fcntl(lock_file, F_SETLK, &mem_write_unlock) == -1) {