Removed HashTable->arHash (reduced memory consumption). Now hash slots may be accessed using HT_HASH() macro.
Hash slotas are allocated together with Buckets (before them) and lay in reverse order from HashTable->arData base address (see comments in Zend/zend_types.h)
Indexes in hash table and conflict resolution chains (Z_NEXT) may be stored as indeces or offsets in bytes, depending on system (32 or 64-bit).
HashTable data filelds are reordered to keep the most useful for zend_hash_find() data in the same CPU cache line.
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
if (packed) {
(ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
- (ht)->arData = (Bucket *) pemalloc((ht)->nTableSize * sizeof(Bucket), (ht)->u.flags & HASH_FLAG_PERSISTENT);
+ HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
+ HT_HASH_RESET_PACKED(ht);
} else {
(ht)->u.flags |= HASH_FLAG_INITIALIZED;
- (ht)->nTableMask = (ht)->nTableSize - 1;
- (ht)->arData = (Bucket *) pemalloc((ht)->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)), (ht)->u.flags & HASH_FLAG_PERSISTENT);
- (ht)->arHash = (uint32_t*)((ht)->arData + (ht)->nTableSize);
- memset((ht)->arHash, INVALID_IDX, (ht)->nTableSize * sizeof(uint32_t));
+ (ht)->nTableMask = -(ht)->nTableSize;
+ HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
+ HT_HASH_RESET(ht);
}
}
}
#define CHECK_INIT(ht, packed) \
zend_hash_check_init(ht, packed)
-static const uint32_t uninitialized_bucket = {INVALID_IDX};
+static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
+ {HT_INVALID_IDX, HT_INVALID_IDX};
ZEND_API void _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC)
{
GC_REFCOUNT(ht) = 1;
GC_TYPE_INFO(ht) = IS_ARRAY;
+ ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION;
ht->nTableSize = zend_hash_check_size(nSize);
- ht->nTableMask = 0;
+ ht->nTableMask = HT_MIN_MASK;
+ HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
+ ht->nInternalPointer = HT_INVALID_IDX;
ht->nNextFreeElement = 0;
- ht->arData = NULL;
- ht->arHash = (uint32_t*)&uninitialized_bucket;
ht->pDestructor = pDestructor;
- ht->nInternalPointer = INVALID_IDX;
- ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION;
-}
-
-static zend_always_inline void zend_hash_realloc(HashTable *ht, size_t new_size)
-{
-#if 1
- if (!(ht->u.flags & HASH_FLAG_PERSISTENT) && new_size <= ZEND_MM_MAX_SMALL_SIZE) {
- Bucket *newData = emalloc(new_size);
- memcpy(newData, ht->arData, ht->nNumUsed * sizeof(Bucket));
- efree(ht->arData);
- ht->arData = newData;
- return;
- }
-#endif
- ht->arData = (Bucket *) perealloc2(ht->arData, new_size, ht->nNumUsed * sizeof(Bucket), ht->u.flags & HASH_FLAG_PERSISTENT);
}
static void zend_hash_packed_grow(HashTable *ht)
}
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize += ht->nTableSize;
- zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket));
+ HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void zend_hash_packed_to_hash(HashTable *ht)
{
+ void *old_data = HT_GET_DATA_ADDR(ht);
+ Bucket *old_buckets = ht->arData;
+
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HANDLE_BLOCK_INTERRUPTIONS();
ht->u.flags &= ~HASH_FLAG_PACKED;
- ht->nTableMask = ht->nTableSize - 1;
- zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)));
- ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize);
+ ht->nTableMask = -ht->nTableSize;
+ HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
+ memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
+ pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void zend_hash_to_packed(HashTable *ht)
{
+ void *old_data = HT_GET_DATA_ADDR(ht);
+ Bucket *old_buckets = ht->arData;
+
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HANDLE_BLOCK_INTERRUPTIONS();
ht->u.flags |= HASH_FLAG_PACKED;
- ht->nTableMask = 0;
- zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket));
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ ht->nTableMask = HT_MIN_MASK;
+ HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
+ HT_HASH_RESET_PACKED(ht);
+ memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
+ pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
if (nSize > ht->nTableSize) {
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize = zend_hash_check_size(nSize);
- zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket));
+ HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
HANDLE_UNBLOCK_INTERRUPTIONS();
}
} else {
ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
if (nSize > ht->nTableSize) {
+ void *old_data = HT_GET_DATA_ADDR(ht);
+ Bucket *old_buckets = ht->arData;
+
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize = zend_hash_check_size(nSize);
- zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)));
- ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize);
- ht->nTableMask = ht->nTableSize - 1;
+ ht->nTableMask = -ht->nTableSize;
+ HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
+ memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
+ pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
HashTableIterator *iter = EG(ht_iterators) + idx;
ZEND_ASSERT(idx != (uint32_t)-1);
- if (iter->pos == INVALID_IDX) {
- return INVALID_IDX;
+ if (iter->pos == HT_INVALID_IDX) {
+ return HT_INVALID_IDX;
} else if (UNEXPECTED(iter->ht != ht)) {
if (EXPECTED(iter->ht) && EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
iter->ht->u.v.nIteratorsCount--;
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_used);
- HashPosition res = INVALID_IDX;
+ HashPosition res = HT_INVALID_IDX;
while (iter != end) {
if (iter->ht == ht) {
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
- Bucket *p;
+ Bucket *p, *arData;
h = zend_string_hash_val(key);
- nIndex = h & ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ht->arData + idx;
- if ((p->key == key) || /* check for the the same interned string */
- (p->h == h &&
- p->key &&
- p->key->len == key->len &&
- memcmp(p->key->val, key->val, key->len) == 0)) {
+ arData = ht->arData;
+ nIndex = h | ht->nTableMask;
+ idx = HT_HASH_EX(arData, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET_EX(arData, idx);
+ if (p->key == key || /* check for the the same interned string */
+ (p->h == h &&
+ p->key &&
+ p->key->len == key->len &&
+ memcmp(p->key->val, key->val, key->len) == 0)) {
return p;
}
idx = Z_NEXT(p->val);
{
uint32_t nIndex;
uint32_t idx;
- Bucket *p;
-
- nIndex = h & ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- ZEND_ASSERT(idx < ht->nTableSize);
- p = ht->arData + idx;
+ Bucket *p, *arData;
+
+ arData = ht->arData;
+ nIndex = h | ht->nTableMask;
+ idx = HT_HASH_EX(arData, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
+ p = HT_HASH_TO_BUCKET_EX(arData, idx);
if ((p->h == h)
&& p->key
&& (p->key->len == len)
{
uint32_t nIndex;
uint32_t idx;
- Bucket *p;
-
- nIndex = h & ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- ZEND_ASSERT(idx < ht->nTableSize);
- p = ht->arData + idx;
+ Bucket *p, *arData;
+
+ arData = ht->arData;
+ nIndex = h | ht->nTableMask;
+ idx = HT_HASH_EX(arData, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
+ p = HT_HASH_TO_BUCKET_EX(arData, idx);
if (p->h == h && !p->key) {
return p;
}
HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
- if (ht->nInternalPointer == INVALID_IDX) {
+ if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = idx;
}
- zend_hash_iterators_update(ht, INVALID_IDX, idx);
+ zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
p = ht->arData + idx;
p->h = h = zend_string_hash_val(key);
p->key = key;
zend_string_addref(key);
ZVAL_COPY_VALUE(&p->val, pData);
- nIndex = h & ht->nTableMask;
- Z_NEXT(p->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = idx;
+ nIndex = h | ht->nTableMask;
+ Z_NEXT(p->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
ht->nNumUsed = h + 1;
}
ht->nNumOfElements++;
- if (ht->nInternalPointer == INVALID_IDX) {
+ if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = h;
}
- zend_hash_iterators_update(ht, INVALID_IDX, h);
+ zend_hash_iterators_update(ht, HT_INVALID_IDX, h);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
- if (ht->nInternalPointer == INVALID_IDX) {
+ if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = idx;
}
- zend_hash_iterators_update(ht, INVALID_IDX, idx);
+ zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
p = ht->arData + idx;
p->h = h;
p->key = NULL;
- nIndex = h & ht->nTableMask;
+ nIndex = h | ht->nTableMask;
ZVAL_COPY_VALUE(&p->val, pData);
- Z_NEXT(p->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = idx;
+ Z_NEXT(p->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
} else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
+ void *old_data = HT_GET_DATA_ADDR(ht);
+ Bucket *old_buckets = ht->arData;
+
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize += ht->nTableSize;
- zend_hash_realloc(ht, ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)));
- ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize);
- ht->nTableMask = ht->nTableSize - 1;
+ ht->nTableMask = -ht->nTableSize;
+ HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
+ memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
+ pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
} else {
if (UNEXPECTED(ht->nNumOfElements == 0)) {
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
- memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t));
+ HT_HASH_RESET(ht);
}
return SUCCESS;
}
- memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t));
+ HT_HASH_RESET(ht);
if (EXPECTED(ht->u.v.nIteratorsCount == 0)) {
for (i = 0, j = 0; i < ht->nNumUsed; i++) {
p = ht->arData + i;
ht->nInternalPointer = j;
}
}
- nIndex = ht->arData[j].h & ht->nTableMask;
- Z_NEXT(ht->arData[j].val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = j;
+ nIndex = ht->arData[j].h | ht->nTableMask;
+ Z_NEXT(ht->arData[j].val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
j++;
}
} else {
iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
}
}
- nIndex = ht->arData[j].h & ht->nTableMask;
- Z_NEXT(ht->arData[j].val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = j;
+ nIndex = ht->arData[j].h | ht->nTableMask;
+ Z_NEXT(ht->arData[j].val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
j++;
}
}
if (prev) {
Z_NEXT(prev->val) = Z_NEXT(p->val);
} else {
- ht->arHash[p->h & ht->nTableMask] = Z_NEXT(p->val);
+ HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
}
}
if (ht->nNumUsed - 1 == idx) {
while (1) {
new_idx++;
if (new_idx >= ht->nNumUsed) {
- new_idx = INVALID_IDX;
+ new_idx = HT_INVALID_IDX;
break;
} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
break;
Bucket *prev = NULL;
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
- uint32_t nIndex = p->h & ht->nTableMask;
- uint32_t i = ht->arHash[nIndex];
+ uint32_t nIndex = p->h | ht->nTableMask;
+ uint32_t i = HT_HASH(ht, nIndex);
if (i != idx) {
- prev = ht->arData + i;
+ prev = HT_HASH_TO_BUCKET(ht, i);
while (Z_NEXT(prev->val) != idx) {
i = Z_NEXT(prev->val);
- prev = ht->arData + i;
+ prev = HT_HASH_TO_BUCKET(ht, i);
}
}
}
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_string_hash_val(key);
- nIndex = h & ht->nTableMask;
+ nIndex = h | ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ht->arData + idx;
+ idx = HT_HASH(ht, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->key == key) ||
(p->h == h &&
p->key &&
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_string_hash_val(key);
- nIndex = h & ht->nTableMask;
+ nIndex = h | ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ht->arData + idx;
+ idx = HT_HASH(ht, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->key == key) ||
(p->h == h &&
p->key &&
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_inline_hash_func(str, len);
- nIndex = h & ht->nTableMask;
+ nIndex = h | ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ht->arData + idx;
+ idx = HT_HASH(ht, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h)
&& p->key
&& (p->key->len == len)
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_inline_hash_func(str, len);
- nIndex = h & ht->nTableMask;
+ nIndex = h | ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ht->arData + idx;
+ idx = HT_HASH(ht, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h)
&& p->key
&& (p->key->len == len)
}
return FAILURE;
}
- nIndex = h & ht->nTableMask;
+ nIndex = h | ht->nTableMask;
- idx = ht->arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ht->arData + idx;
+ idx = HT_HASH(ht, nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h) && (p->key == NULL)) {
_zend_hash_del_el_ex(ht, idx, p, prev);
return SUCCESS;
} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
return;
}
- pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT);
+ pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
ZEND_API void zend_array_destroy(HashTable *ht)
} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
goto free_ht;
}
- efree(ht->arData);
+ efree(HT_GET_DATA_ADDR(ht));
free_ht:
FREE_HASHTABLE(ht);
}
}
}
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
- memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t));
+ HT_HASH_RESET(ht);
}
}
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nNextFreeElement = 0;
- ht->nInternalPointer = INVALID_IDX;
+ ht->nInternalPointer = HT_INVALID_IDX;
}
ZEND_API void zend_symtable_clean(HashTable *ht)
}
} while (++p != end);
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
- memset(ht->arHash, INVALID_IDX, ht->nTableSize * sizeof(uint32_t));
+ HT_HASH_RESET(ht);
}
}
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nNextFreeElement = 0;
- ht->nInternalPointer = INVALID_IDX;
+ ht->nInternalPointer = HT_INVALID_IDX;
}
ZEND_API void zend_hash_graceful_destroy(HashTable *ht)
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
- _zend_hash_del_el(ht, idx, p);
+ _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
- pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT);
+ pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
SET_INCONSISTENT(HT_DESTROYED);
idx--;
p = ht->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
- _zend_hash_del_el(ht, idx, p);
+ _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
- pefree(ht->arData, ht->u.flags & HASH_FLAG_PERSISTENT);
+ pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
SET_INCONSISTENT(HT_DESTROYED);
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
- _zend_hash_del_el(ht, idx, p);
+ _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
result = apply_func(&p->val, argument);
if (result & ZEND_HASH_APPLY_REMOVE) {
- _zend_hash_del_el(ht, idx, p);
+ _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
result = apply_func(&p->val, num_args, args, &hash_key);
if (result & ZEND_HASH_APPLY_REMOVE) {
- _zend_hash_del_el(ht, idx, p);
+ _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
va_end(args);
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
- _zend_hash_del_el(ht, idx, p);
+ _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
- setTargetPointer = (target->nInternalPointer == INVALID_IDX);
+ setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
if (setTargetPointer && source->nInternalPointer == idx) {
- target->nInternalPointer = INVALID_IDX;
+ target->nInternalPointer = HT_INVALID_IDX;
}
/* INDIRECT element may point to UNDEF-ined slots */
data = &p->val;
pCopyConstructor(new_entry);
}
}
- if (target->nInternalPointer == INVALID_IDX && target->nNumOfElements > 0) {
+ if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
target->nTableMask = source->nTableMask;
target->nTableSize = source->nTableSize;
target->pDestructor = source->pDestructor;
- target->nInternalPointer = INVALID_IDX;
+ target->nInternalPointer = HT_INVALID_IDX;
target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
target_idx = 0;
target->nNumUsed = source->nNumUsed;
target->nNumOfElements = source->nNumOfElements;
target->nNextFreeElement = source->nNextFreeElement;
- target->arData = (Bucket *) pemalloc(target->nTableSize * sizeof(Bucket), 0);
- target->arHash = (uint32_t*)&uninitialized_bucket;
+ HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
target->nInternalPointer = source->nInternalPointer;
+ HT_HASH_RESET_PACKED(target);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
}
}
if (target->nNumOfElements > 0 &&
- target->nInternalPointer == INVALID_IDX) {
+ target->nInternalPointer == HT_INVALID_IDX) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
} else {
target->nNextFreeElement = source->nNextFreeElement;
- target->arData = (Bucket *) pemalloc(target->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)), 0);
- target->arHash = (uint32_t*)(target->arData + target->nTableSize);
- memset(target->arHash, INVALID_IDX, target->nTableSize * sizeof(uint32_t));
+ HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
+ HT_HASH_RESET(target);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (q->key) {
zend_string_addref(q->key);
}
- nIndex = q->h & target->nTableMask;
- Z_NEXT(q->val) = target->arHash[nIndex];
- target->arHash[nIndex] = target_idx;
+ nIndex = q->h | target->nTableMask;
+ Z_NEXT(q->val) = HT_HASH(target, nIndex);
+ HT_HASH(target, nIndex) = HT_IDX_TO_HASH(target_idx);
if (Z_OPT_REFCOUNTED_P(data)) {
if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1) {
ZVAL_COPY(&q->val, Z_REFVAL_P(data));
target->nNumUsed = target_idx;
target->nNumOfElements = target_idx;
if (target->nNumOfElements > 0 &&
- target->nInternalPointer == INVALID_IDX) {
+ target->nInternalPointer == HT_INVALID_IDX) {
target->nInternalPointer = 0;
}
}
target->nNumUsed = 0;
target->nNumOfElements = 0;
target->nNextFreeElement = 0;
- target->arData = NULL;
- target->arHash = (uint32_t*)&uninitialized_bucket;
+ HT_SET_DATA_ADDR(target, &uninitialized_bucket);
}
return target;
}
return;
}
}
- *pos = INVALID_IDX;
+ *pos = HT_INVALID_IDX;
}
return;
}
}
- *pos = INVALID_IDX;
+ *pos = HT_INVALID_IDX;
}
IS_CONSISTENT(ht);
HT_ASSERT(ht->nInternalPointer != &pos || GC_REFCOUNT(ht) == 1);
- if (idx != INVALID_IDX) {
+ if (idx != HT_INVALID_IDX) {
while (1) {
idx++;
if (idx >= ht->nNumUsed) {
- *pos = INVALID_IDX;
+ *pos = HT_INVALID_IDX;
return SUCCESS;
}
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
IS_CONSISTENT(ht);
HT_ASSERT(ht->nInternalPointer != &pos || GC_REFCOUNT(ht) == 1);
- if (idx != INVALID_IDX) {
+ if (idx != HT_INVALID_IDX) {
while (idx > 0) {
idx--;
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
return SUCCESS;
}
}
- *pos = INVALID_IDX;
+ *pos = HT_INVALID_IDX;
return SUCCESS;
} else {
return FAILURE;
Bucket *p;
IS_CONSISTENT(ht);
- if (idx != INVALID_IDX) {
+ if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
if (p->key) {
*str_index = p->key;
Bucket *p;
IS_CONSISTENT(ht);
- if (idx == INVALID_IDX) {
+ if (idx == HT_INVALID_IDX) {
ZVAL_NULL(key);
} else {
p = ht->arData + idx;
Bucket *p;
IS_CONSISTENT(ht);
- if (idx != INVALID_IDX) {
+ if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
if (p->key) {
return HASH_KEY_IS_STRING;
Bucket *p;
IS_CONSISTENT(ht);
- if (idx != INVALID_IDX) {
+ if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
return &p->val;
} else {
}
} else {
if (renumber) {
+ void *old_data = HT_GET_DATA_ADDR(ht);
+ Bucket *old_buckets = ht->arData;
+
ht->u.flags |= HASH_FLAG_PACKED;
- ht->nTableMask = 0;
- zend_hash_realloc(ht, ht->nTableSize * sizeof(Bucket));
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ ht->nTableMask = HT_MIN_MASK;
+ HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT));
+ memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
+ pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT);
+ HT_HASH_RESET_PACKED(ht);
} else {
zend_hash_rehash(ht);
}
#define HASH_ADD_NEW (1<<3)
#define HASH_ADD_NEXT (1<<4)
-#define INVALID_IDX ((uint32_t) -1)
-
#define HASH_FLAG_PERSISTENT (1<<0)
#define HASH_FLAG_APPLY_PROTECTION (1<<1)
#define HASH_FLAG_PACKED (1<<2)
ZVAL_COPY_VALUE(&p->val, zv);
p->key = zend_string_copy(key);
p->h = zend_string_hash_val(key);
- nIndex = p->h & ht->nTableMask;
- Z_NEXT(p->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = idx;
+ nIndex = p->h | ht->nTableMask;
+ Z_NEXT(p->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
ht->nNumUsed = idx + 1;
ht->nNumOfElements++;
}
ZVAL_PTR(&p->val, ptr);
p->key = zend_string_copy(key);
p->h = zend_string_hash_val(key);
- nIndex = p->h & ht->nTableMask;
- Z_NEXT(p->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = idx;
+ nIndex = p->h | ht->nTableMask;
+ Z_NEXT(p->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
ht->nNumUsed = idx + 1;
ht->nNumOfElements++;
}
ZVAL_INDIRECT(&p->val, ptr);
p->key = zend_string_copy(key);
p->h = zend_string_hash_val(key);
- nIndex = p->h & ht->nTableMask;
- Z_NEXT(p->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = idx;
+ nIndex = p->h | ht->nTableMask;
+ Z_NEXT(p->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
ht->nNumUsed = idx + 1;
ht->nNumOfElements++;
}
zend_hash_init(&CG(interned_strings), 1024, NULL, _str_dtor, 1);
- CG(interned_strings).nTableMask = CG(interned_strings).nTableSize - 1;
- CG(interned_strings).arData = (Bucket*) pecalloc(CG(interned_strings).nTableSize, sizeof(Bucket), 1);
- CG(interned_strings).arHash = (uint32_t*) pecalloc(CG(interned_strings).nTableSize, sizeof(uint32_t), 1);
- memset(CG(interned_strings).arHash, INVALID_IDX, CG(interned_strings).nTableSize * sizeof(uint32_t));
+ CG(interned_strings).nTableMask = -CG(interned_strings).nTableSize;
+ HT_SET_DATA_ADDR(&CG(interned_strings), pemalloc(HT_SIZE(&CG(interned_strings)), 1));
+ HT_HASH_RESET(&CG(interned_strings));
/* interned empty string */
str = zend_string_alloc(sizeof("")-1, 1);
}
h = zend_string_hash_val(str);
- nIndex = h & CG(interned_strings).nTableMask;
- idx = CG(interned_strings).arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = CG(interned_strings).arData + idx;
+ nIndex = h | CG(interned_strings).nTableMask;
+ idx = HT_HASH(&CG(interned_strings), nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET(&CG(interned_strings), idx);
if ((p->h == h) && (p->key->len == str->len)) {
if (!memcmp(p->key->val, str->val, str->len)) {
zend_string_release(str);
if (CG(interned_strings).nNumUsed >= CG(interned_strings).nTableSize) {
if (CG(interned_strings).nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
- Bucket *d = (Bucket *) perealloc_recoverable(CG(interned_strings).arData, (CG(interned_strings).nTableSize << 1) * sizeof(Bucket), 1);
- uint32_t *h = (uint32_t *) perealloc_recoverable(CG(interned_strings).arHash, (CG(interned_strings).nTableSize << 1) * sizeof(uint32_t), 1);
-
- if (d && h) {
- HANDLE_BLOCK_INTERRUPTIONS();
- CG(interned_strings).arData = d;
- CG(interned_strings).arHash = h;
- CG(interned_strings).nTableSize = (CG(interned_strings).nTableSize << 1);
- CG(interned_strings).nTableMask = CG(interned_strings).nTableSize - 1;
+ void *new_data;
+ void *old_data = HT_GET_DATA_ADDR(&CG(interned_strings));
+ Bucket *old_buckets = CG(interned_strings).arData;
+
+ HANDLE_BLOCK_INTERRUPTIONS();
+ CG(interned_strings).nTableSize += CG(interned_strings).nTableSize;
+ CG(interned_strings).nTableMask = -CG(interned_strings).nTableSize;
+ new_data = malloc(HT_SIZE(&CG(interned_strings)));
+
+ if (new_data) {
+ HT_SET_DATA_ADDR(&CG(interned_strings), new_data);
+ memcpy(CG(interned_strings).arData, old_buckets, sizeof(Bucket) * CG(interned_strings).nNumUsed);
+ free(old_data);
zend_hash_rehash(&CG(interned_strings));
- HANDLE_UNBLOCK_INTERRUPTIONS();
+ } else {
+ CG(interned_strings).nTableSize = CG(interned_strings).nTableSize >> 1;
+ CG(interned_strings).nTableMask = -CG(interned_strings).nTableSize;
}
+ HANDLE_UNBLOCK_INTERRUPTIONS();
}
}
p->key = str;
Z_STR(p->val) = str;
Z_TYPE_INFO(p->val) = IS_INTERNED_STRING_EX;
- nIndex = h & CG(interned_strings).nTableMask;
- Z_NEXT(p->val) = CG(interned_strings).arHash[nIndex];
- CG(interned_strings).arHash[nIndex] = idx;
+ nIndex = h | CG(interned_strings).nTableMask;
+ Z_NEXT(p->val) = HT_HASH(&CG(interned_strings), nIndex);
+ HT_HASH(&CG(interned_strings), nIndex) = HT_IDX_TO_HASH(idx);
HANDLE_UNBLOCK_INTERRUPTIONS();
GC_REFCOUNT(p->key) = 1;
zend_string_free(p->key);
- nIndex = p->h & CG(interned_strings).nTableMask;
- if (CG(interned_strings).arHash[nIndex] == idx) {
- CG(interned_strings).arHash[nIndex] = Z_NEXT(p->val);
+ nIndex = p->h | CG(interned_strings).nTableMask;
+ if (HT_HASH(&CG(interned_strings), nIndex) == HT_IDX_TO_HASH(idx)) {
+ HT_HASH(&CG(interned_strings), nIndex) = Z_NEXT(p->val);
} else {
- uint prev = CG(interned_strings).arHash[nIndex];
- while (Z_NEXT(CG(interned_strings).arData[prev].val) != idx) {
- prev = Z_NEXT(CG(interned_strings).arData[prev].val);
+ uint32_t prev = HT_HASH(&CG(interned_strings), nIndex);
+ while (Z_NEXT(HT_HASH_TO_BUCKET(&CG(interned_strings), prev)->val) != idx) {
+ prev = Z_NEXT(HT_HASH_TO_BUCKET(&CG(interned_strings), prev)->val);
}
- Z_NEXT(CG(interned_strings).arData[prev].val) = Z_NEXT(p->val);
+ Z_NEXT(HT_HASH_TO_BUCKET(&CG(interned_strings), prev)->val) = Z_NEXT(p->val);
}
}
#endif
} v;
uint32_t flags;
} u;
- uint32_t nTableSize;
uint32_t nTableMask;
+ Bucket *arData;
uint32_t nNumUsed;
uint32_t nNumOfElements;
+ uint32_t nTableSize;
uint32_t nInternalPointer;
zend_long nNextFreeElement;
- Bucket *arData;
- uint32_t *arHash;
dtor_func_t pDestructor;
};
+/*
+ * HashTable Data Layout
+ * =====================
+ *
+ * +=============================+
+ * | HT_HASH(ht, ht->nTableMask) |
+ * | ... |
+ * | HT_HASH(ht, -1) |
+ * +-----------------------------+
+ * ht->arData ---> | Bucket[0] |
+ * | ... |
+ * | Bucket[ht->nTableSize-1] |
+ * +=============================+
+ */
+
+#define HT_INVALID_IDX ((uint32_t) -1)
+
+#define HT_MIN_MASK ((uint32_t) -2)
#define HT_MIN_SIZE 8
#if SIZEOF_SIZE_T == 4
# define HT_MAX_SIZE 0x04000000 /* small enough to avoid overflow checks */
+# define HT_HASH_TO_BUCKET_EX(data, idx) \
+ ((Bucket*)((char*)(data) + (idx)))
+# define HT_IDX_TO_HASH(idx) \
+ ((idx) * sizeof(Bucket))
#elif SIZEOF_SIZE_T == 8
# define HT_MAX_SIZE 0x80000000
+# define HT_HASH_TO_BUCKET_EX(data, idx) \
+ ((data) + (idx))
+# define HT_IDX_TO_HASH(idx) \
+ (idx)
#else
# error "Unknown SIZEOF_SIZE_T"
#endif
+#define HT_HASH_EX(data, idx) \
+ ((uint32_t*)(data))[(int)(idx)]
+#define HT_HASH(ht, idx) \
+ HT_HASH_EX((ht)->arData, idx)
+
+#define HT_HASH_SIZE(ht) \
+ ((-(int)(ht)->nTableMask) * sizeof(uint32_t))
+#define HT_DATA_SIZE(ht) \
+ ((ht)->nTableSize * sizeof(Bucket))
+#define HT_SIZE(ht) \
+ (HT_HASH_SIZE(ht) + HT_DATA_SIZE(ht))
+#define HT_USED_SIZE(ht) \
+ (HT_HASH_SIZE(ht) + ((ht)->nNumUsed * sizeof(Bucket)))
+#define HT_HASH_RESET(ht) \
+ memset(&HT_HASH(ht, (ht)->nTableMask), HT_INVALID_IDX, HT_HASH_SIZE(ht))
+#define HT_HASH_RESET_PACKED(ht) do { \
+ HT_HASH(ht, -2) = HT_INVALID_IDX; \
+ HT_HASH(ht, -1) = HT_INVALID_IDX; \
+ } while (0)
+#define HT_HASH_TO_BUCKET(ht, idx) \
+ HT_HASH_TO_BUCKET_EX((ht)->arData, idx)
+
+#define HT_SET_DATA_ADDR(ht, ptr) do { \
+ (ht)->arData = (Bucket*)(((char*)(ptr)) + HT_HASH_SIZE(ht)); \
+ } while (0)
+#define HT_GET_DATA_ADDR(ht) \
+ ((char*)((ht)->arData) - HT_HASH_SIZE(ht))
+
typedef uint32_t HashPosition;
typedef struct _HashTableIterator {
while (1) {
pos++;
if (pos >= fe_ht->nNumUsed) {
- pos = INVALID_IDX;
+ pos = HT_INVALID_IDX;
break;
}
p = fe_ht->arData + pos;
while (1) {
pos++;
if (pos >= fe_ht->nNumUsed) {
- pos = INVALID_IDX;
+ pos = HT_INVALID_IDX;
break;
}
p = fe_ht->arData + pos;
while (1) {
pos++;
if (pos >= fe_ht->nNumUsed) {
- pos = INVALID_IDX;
+ pos = HT_INVALID_IDX;
break;
}
p = fe_ht->arData + pos;
while (1) {
pos++;
if (pos >= fe_ht->nNumUsed) {
- pos = INVALID_IDX;
+ pos = HT_INVALID_IDX;
break;
}
p = fe_ht->arData + pos;
while (1) {
pos++;
if (pos >= fe_ht->nNumUsed) {
- pos = INVALID_IDX;
+ pos = HT_INVALID_IDX;
break;
}
p = fe_ht->arData + pos;
while (1) {
pos++;
if (pos >= fe_ht->nNumUsed) {
- pos = INVALID_IDX;
+ pos = HT_INVALID_IDX;
break;
}
p = fe_ht->arData + pos;
ZCSG(interned_strings).nNumUsed--;
ZCSG(interned_strings).nNumOfElements--;
- nIndex = p->h & ZCSG(interned_strings).nTableMask;
- if (ZCSG(interned_strings).arHash[nIndex] == idx) {
- ZCSG(interned_strings).arHash[nIndex] = Z_NEXT(p->val);
+ nIndex = p->h | ZCSG(interned_strings).nTableMask;
+ if (HT_HASH(&ZCSG(interned_strings), nIndex) == HT_IDX_TO_HASH(idx)) {
+ HT_HASH(&ZCSG(interned_strings), nIndex) = Z_NEXT(p->val);
} else {
- uint prev = ZCSG(interned_strings).arHash[nIndex];
- while (Z_NEXT(ZCSG(interned_strings).arData[prev].val) != idx) {
- prev = Z_NEXT(ZCSG(interned_strings).arData[prev].val);
+ uint32_t prev = HT_HASH(&ZCSG(interned_strings), nIndex);
+ while (Z_NEXT(HT_HASH_TO_BUCKET(&ZCSG(interned_strings), prev)->val) != idx) {
+ prev = Z_NEXT(HT_HASH_TO_BUCKET(&ZCSG(interned_strings), prev)->val);
}
- Z_NEXT(ZCSG(interned_strings).arData[prev].val) = Z_NEXT(p->val);
+ Z_NEXT(HT_HASH_TO_BUCKET(&ZCSG(interned_strings), prev)->val) = Z_NEXT(p->val);
}
}
}
zend_ulong h;
uint nIndex;
uint idx;
- Bucket *p;
+ Bucket *arData, *p;
if (IS_ACCEL_INTERNED(str)) {
/* this is already an interned string */
}
h = zend_string_hash_val(str);
- nIndex = h & ZCSG(interned_strings).nTableMask;
+ nIndex = h | ZCSG(interned_strings).nTableMask;
/* check for existing interned string */
- idx = ZCSG(interned_strings).arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ZCSG(interned_strings).arData + idx;
+ idx = HT_HASH(&ZCSG(interned_strings), nIndex);
+ arData = ZCSG(interned_strings).arData;
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET_EX(arData, idx);
if ((p->h == h) && (p->key->len == str->len)) {
if (!memcmp(p->key->val, str->val, str->len)) {
return p->key;
}
h = zend_string_hash_val(str);
- nIndex = h & ZCSG(interned_strings).nTableMask;
+ nIndex = h | ZCSG(interned_strings).nTableMask;
/* check for existing interned string */
- idx = ZCSG(interned_strings).arHash[nIndex];
- while (idx != INVALID_IDX) {
- p = ZCSG(interned_strings).arData + idx;
+ idx = HT_HASH(&ZCSG(interned_strings), nIndex);
+ while (idx != HT_INVALID_IDX) {
+ p = HT_HASH_TO_BUCKET(&ZCSG(interned_strings), idx);
if ((p->h == h) && (p->key->len == str->len)) {
if (!memcmp(p->key->val, str->val, str->len)) {
zend_string_release(str);
p->key->len = str->len;
memcpy(p->key->val, str->val, str->len);
ZVAL_INTERNED_STR(&p->val, p->key);
- Z_NEXT(p->val) = ZCSG(interned_strings).arHash[nIndex];
- ZCSG(interned_strings).arHash[nIndex] = idx;
+ Z_NEXT(p->val) = HT_HASH(&ZCSG(interned_strings), nIndex);
+ HT_HASH(&ZCSG(interned_strings), nIndex) = HT_IDX_TO_HASH(idx);
zend_string_release(str);
return p->key;
#else
static inline void zend_accel_fast_del_bucket(HashTable *ht, uint32_t idx, Bucket *p)
{
- uint32_t nIndex = p->h & ht->nTableMask;
- uint32_t i = ht->arHash[nIndex];
+ uint32_t nIndex = p->h | ht->nTableMask;
+ uint32_t i = HT_HASH(ht, nIndex);
ht->nNumUsed--;
ht->nNumOfElements--;
if (idx != i) {
- Bucket *prev = ht->arData + i;
+ Bucket *prev = HT_HASH_TO_BUCKET(ht, i);
while (Z_NEXT(prev->val) != idx) {
i = Z_NEXT(prev->val);
- prev = ht->arData + i;
+ prev = HT_HASH_TO_BUCKET(ht, i);
}
Z_NEXT(prev->val) = Z_NEXT(p->val);
} else {
- ht->arHash[p->h & ht->nTableMask] = Z_NEXT(p->val);
+ HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
}
}
}
}
}
- zend_accel_fast_del_bucket(EG(function_table), _idx-1, _p);
+ zend_accel_fast_del_bucket(EG(function_table), HT_IDX_TO_HASH(_idx-1), _p);
}
} ZEND_HASH_FOREACH_END();
}
ce->static_members_table = NULL;
}
- zend_accel_fast_del_bucket(EG(class_table), _idx-1, _p);
+ zend_accel_fast_del_bucket(EG(class_table), HT_IDX_TO_HASH(_idx-1), _p);
}
} ZEND_HASH_FOREACH_END();
if (c->flags & CONST_PERSISTENT) {
break;
} else {
- zend_accel_fast_del_bucket(EG(zend_constants), _idx-1, _p);
+ zend_accel_fast_del_bucket(EG(zend_constants), HT_IDX_TO_HASH(_idx-1), _p);
}
} ZEND_HASH_FOREACH_END();
}
# ifndef ZTS
zend_hash_init(&ZCSG(interned_strings), (ZCG(accel_directives).interned_strings_buffer * 1024 * 1024) / (sizeof(Bucket) + sizeof(Bucket*) + 8 /* average string length */), NULL, NULL, 1);
if (ZCG(accel_directives).interned_strings_buffer) {
- ZCSG(interned_strings).nTableMask = ZCSG(interned_strings).nTableSize - 1;
- ZCSG(interned_strings).arData = zend_shared_alloc(ZCSG(interned_strings).nTableSize * sizeof(Bucket));
- ZCSG(interned_strings).arHash = (uint32_t*)zend_shared_alloc(ZCSG(interned_strings).nTableSize * sizeof(uint32_t));
+ void *data;
+
+ ZCSG(interned_strings).nTableMask = -ZCSG(interned_strings).nTableSize;
+ data = zend_shared_alloc(HT_SIZE(&ZCSG(interned_strings)));
ZCSG(interned_strings_start) = zend_shared_alloc((ZCG(accel_directives).interned_strings_buffer * 1024 * 1024));
- if (!ZCSG(interned_strings).arData || !ZCSG(interned_strings_start)) {
+ if (!data || !ZCSG(interned_strings_start)) {
zend_accel_error(ACCEL_LOG_FATAL, ACCELERATOR_PRODUCT_NAME " cannot allocate buffer for interned strings");
return FAILURE;
}
- memset(ZCSG(interned_strings).arHash, INVALID_IDX, ZCSG(interned_strings).nTableSize * sizeof(uint32_t));
+ HT_SET_DATA_ADDR(&ZCSG(interned_strings), data);
+ HT_HASH_RESET(&ZCSG(interned_strings));
ZCSG(interned_strings_end) = ZCSG(interned_strings_start) + (ZCG(accel_directives).interned_strings_buffer * 1024 * 1024);
ZCSG(interned_strings_top) = ZCSG(interned_strings_start);
typedef int (*id_function_t)(void *, void *);
typedef void (*unique_copy_ctor_func_t)(void *pElement);
-static const uint32_t uninitialized_bucket = {INVALID_IDX};
-
static void zend_hash_clone_zval(HashTable *ht, HashTable *source, int bind);
static zend_ast *zend_ast_clone(zend_ast *ast);
ht->nNextFreeElement = source->nNextFreeElement;
ht->pDestructor = ZVAL_PTR_DTOR;
ht->u.flags = (source->u.flags & HASH_FLAG_INITIALIZED) | HASH_FLAG_APPLY_PROTECTION;
- ht->arData = NULL;
- ht->arHash = NULL;
- ht->nInternalPointer = source->nNumOfElements ? 0 : INVALID_IDX;
+ ht->nInternalPointer = source->nNumOfElements ? 0 : HT_INVALID_IDX;
if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) {
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ ht->arData = source->arData;
return;
}
if (source->u.flags & HASH_FLAG_PACKED) {
ht->u.flags |= HASH_FLAG_PACKED;
- ht->arData = (Bucket *) emalloc(ht->nTableSize * sizeof(Bucket));
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ HT_SET_DATA_ADDR(ht, (Bucket *) emalloc(HT_SIZE(ht)));
+ HT_HASH_RESET_PACKED(ht);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
- nIndex = p->h & ht->nTableMask;
+ nIndex = p->h | ht->nTableMask;
r = ht->arData + ht->nNumUsed;
q = ht->arData + p->h;
zend_clone_zval(&q->val, bind);
}
} else {
- ht->arData = (Bucket *) emalloc(ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)));
- ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize);
- memset(ht->arHash, INVALID_IDX, sizeof(uint32_t) * ht->nTableSize);
+ HT_SET_DATA_ADDR(ht, emalloc(HT_SIZE(ht)));
+ HT_HASH_RESET(ht);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
- nIndex = p->h & ht->nTableMask;
+ nIndex = p->h | ht->nTableMask;
/* Insert into hash collision list */
q = ht->arData + ht->nNumUsed;
- Z_NEXT(q->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = ht->nNumUsed++;
+ Z_NEXT(q->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(ht->nNumUsed++);
/* Initialize key */
q->h = p->h;
ht->nNextFreeElement = source->nNextFreeElement;
ht->pDestructor = ZEND_FUNCTION_DTOR;
ht->u.flags = (source->u.flags & HASH_FLAG_INITIALIZED);
- ht->nInternalPointer = source->nNumOfElements ? 0 : INVALID_IDX;
+ ht->nInternalPointer = source->nNumOfElements ? 0 : HT_INVALID_IDX;
if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) {
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ ht->arData = source->arData;
return;
}
ZEND_ASSERT(!(source->u.flags & HASH_FLAG_PACKED));
- ht->arData = (Bucket *) emalloc(ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)));
- ht->arHash = (uint32_t *)(ht->arData + ht->nTableSize);
- memset(ht->arHash, INVALID_IDX, sizeof(uint32_t) * ht->nTableSize);
+ HT_SET_DATA_ADDR(ht, emalloc(HT_SIZE(ht)));
+ HT_HASH_RESET(ht);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
- nIndex = p->h & ht->nTableMask;
+ nIndex = p->h | ht->nTableMask;
/* Insert into hash collision list */
q = ht->arData + ht->nNumUsed;
- Z_NEXT(q->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = ht->nNumUsed++;
+ Z_NEXT(q->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(ht->nNumUsed++);
/* Initialize key */
q->h = p->h;
ht->nNextFreeElement = source->nNextFreeElement;
ht->pDestructor = NULL;
ht->u.flags = (source->u.flags & HASH_FLAG_INITIALIZED);
- ht->nInternalPointer = source->nNumOfElements ? 0 : INVALID_IDX;
+ ht->nInternalPointer = source->nNumOfElements ? 0 : HT_INVALID_IDX;
if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) {
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ ht->arData = source->arData;
return;
}
ZEND_ASSERT(!(source->u.flags & HASH_FLAG_PACKED));
- ht->arData = (Bucket *) emalloc(ht->nTableSize * (sizeof(Bucket) + sizeof(uint32_t)));
- ht->arHash = (uint32_t*)(ht->arData + ht->nTableSize);
- memset(ht->arHash, INVALID_IDX, sizeof(uint32_t) * ht->nTableSize);
+ HT_SET_DATA_ADDR(ht, emalloc(HT_SIZE(ht)));
+ HT_HASH_RESET(ht);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
- nIndex = p->h & ht->nTableMask;
+ nIndex = p->h | ht->nTableMask;
/* Insert into hash collision list */
q = ht->arData + ht->nNumUsed;
- Z_NEXT(q->val) = ht->arHash[nIndex];
- ht->arHash[nIndex] = ht->nNumUsed++;
+ Z_NEXT(q->val) = HT_HASH(ht, nIndex);
+ HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(ht->nNumUsed++);
/* Initialize key */
q->h = p->h;
}
}
}
- target->nInternalPointer = target->nNumOfElements ? 0 : INVALID_IDX;
+ target->nInternalPointer = target->nNumOfElements ? 0 : HT_INVALID_IDX;
return;
failure:
}
Z_PTR_P(t) = ARENA_REALLOC(Z_PTR(p->val));
}
- target->nInternalPointer = target->nNumOfElements ? 0 : INVALID_IDX;
+ target->nInternalPointer = target->nNumOfElements ? 0 : HT_INVALID_IDX;
return;
failure:
pCopyConstructor(&Z_PTR_P(t));
}
}
- target->nInternalPointer = target->nNumOfElements ? 0 : INVALID_IDX;
+ target->nInternalPointer = target->nNumOfElements ? 0 : HT_INVALID_IDX;
return;
failure:
static void zend_persist_zval(zval *z);
static void zend_persist_zval_const(zval *z);
-static const uint32_t uninitialized_bucket = {INVALID_IDX};
+static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
+ {HT_INVALID_IDX, HT_INVALID_IDX};
static void zend_hash_persist(HashTable *ht, zend_persist_func_t pPersistElement)
{
Bucket *p;
if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) {
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
return;
}
if (ht->u.flags & HASH_FLAG_PACKED) {
- zend_accel_store(ht->arData, sizeof(Bucket) * ht->nNumUsed);
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ void *data = HT_GET_DATA_ADDR(ht);
+ zend_accel_store(data, HT_USED_SIZE(ht));
+ HT_SET_DATA_ADDR(ht, data);
} else {
- Bucket *d = (Bucket*)ZCG(mem);
- uint32_t *h = (uint32_t*)(d + ht->nNumUsed);
+ void *data = ZCG(mem);
+ void *old_data = HT_GET_DATA_ADDR(ht);
- ZCG(mem) = (void*)(h + ht->nTableSize);
- memcpy(d, ht->arData, sizeof(Bucket) * ht->nNumUsed);
- memcpy(h, ht->arHash, sizeof(uint32_t) * ht->nTableSize);
- efree(ht->arData);
- ht->arData = d;
- ht->arHash = h;
+ ZCG(mem) = (void*)((char*)data + HT_USED_SIZE(ht));
+ memcpy(data, old_data, HT_USED_SIZE(ht));
+ efree(old_data);
+ HT_SET_DATA_ADDR(ht, data);
}
+
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (Z_TYPE(p->val) == IS_UNDEF) continue;
Bucket *p;
if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) {
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
return;
}
if (ht->u.flags & HASH_FLAG_PACKED) {
- ht->arData = zend_accel_memdup(ht->arData, sizeof(Bucket) * ht->nNumUsed);
- ht->arHash = (uint32_t*)&uninitialized_bucket;
+ HT_SET_DATA_ADDR(ht, zend_accel_memdup(HT_GET_DATA_ADDR(ht), HT_USED_SIZE(ht)));
} else {
- Bucket *d = (Bucket*)ZCG(mem);
- uint32_t *h = (uint32_t*)(d + ht->nNumUsed);
-
- ZCG(mem) = (void*)(h + ht->nTableSize);
- memcpy(d, ht->arData, sizeof(Bucket) * ht->nNumUsed);
- memcpy(h, ht->arHash, sizeof(uint32_t) * ht->nTableSize);
- ht->arData = d;
- ht->arHash = h;
+ void *data = ZCG(mem);
+
+ ZCG(mem) = (void*)((char*)data + HT_USED_SIZE(ht));
+ memcpy(data, HT_GET_DATA_ADDR(ht), HT_USED_SIZE(ht));
+ HT_SET_DATA_ADDR(ht, data);
}
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) {
return;
}
- if (ht->u.flags & HASH_FLAG_PACKED) {
- ADD_SIZE(sizeof(Bucket) * ht->nNumUsed);
- } else {
- ADD_SIZE(sizeof(Bucket) * ht->nNumUsed + sizeof(uint32_t) * ht->nTableSize);
- }
+
+ ADD_SIZE(HT_USED_SIZE(ht));
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
static void spl_array_update_pos(HashTable *ht, spl_array_object* intern) /* {{{ */
{
uint pos = intern->pos;
- if (pos != INVALID_IDX) {
+ if (pos != HT_INVALID_IDX) {
intern->pos_h = ht->arData[pos].h;
}
} /* }}} */
return SUCCESS;
}
} else {
- idx = ht->arHash[intern->pos_h & ht->nTableMask];
- while (idx != INVALID_IDX) {
- if (idx == intern->pos) {
+ uint32_t pos = HT_IDX_TO_HASH(intern->pos);
+
+ idx = HT_HASH(ht, intern->pos_h | ht->nTableMask);
+ while (idx != HT_INVALID_IDX) {
+ if (idx == pos) {
return SUCCESS;
}
- idx = Z_NEXT(ht->arData[idx].val);
+ idx = Z_NEXT(HT_HASH_TO_BUCKET(ht, idx)->val);
}
}
/* HASH_UNPROTECT_RECURSION(ht); */
return FAILURE;
}
- if (object->pos != INVALID_IDX && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht) == FAILURE) {
+ if (object->pos != HT_INVALID_IDX && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht) == FAILURE) {
php_error_docref(NULL, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix);
return FAILURE;
}
}
spl_array_write_dimension(object, NULL, append_value);
- if (intern->pos == INVALID_IDX) {
+ if (intern->pos == HT_INVALID_IDX) {
if (aht->nNumUsed && !Z_ISUNDEF(aht->arData[aht->nNumUsed-1].val)) {
spl_array_set_pos(intern, aht, aht->nNumUsed - 1);
}
pos = intern->pos;
*count = 0;
spl_array_rewind(intern);
- while(intern->pos != INVALID_IDX && spl_array_next(intern) == SUCCESS) {
+ while(intern->pos != HT_INVALID_IDX && spl_array_next(intern) == SUCCESS) {
(*count)++;
}
spl_array_set_pos(intern, aht, pos);
intern = emalloc(sizeof(spl_SplObjectStorage) + zend_object_properties_size(parent));
memset(intern, 0, sizeof(spl_SplObjectStorage) - sizeof(zval));
- intern->pos = INVALID_IDX;
+ intern->pos = HT_INVALID_IDX;
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
in_hash->nNumOfElements = out_hash.nNumOfElements;
in_hash->nNextFreeElement = out_hash.nNextFreeElement;
in_hash->arData = out_hash.arData;
- in_hash->arHash = out_hash.arHash;
in_hash->pDestructor = out_hash.pDestructor;
zend_hash_internal_pointer_reset(in_hash);
Z_ARRVAL_P(stack)->nNumOfElements = new_hash.nNumOfElements;
Z_ARRVAL_P(stack)->nNextFreeElement = new_hash.nNextFreeElement;
Z_ARRVAL_P(stack)->arData = new_hash.arData;
- Z_ARRVAL_P(stack)->arHash = new_hash.arHash;
Z_ARRVAL_P(stack)->pDestructor = new_hash.pDestructor;
zend_hash_internal_pointer_reset(Z_ARRVAL_P(stack));