{
GC_REFCOUNT(ht) = 1;
GC_TYPE_INFO(ht) = IS_ARRAY;
- ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION;
+ ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
ht->nTableSize = zend_hash_check_size(nSize);
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HANDLE_BLOCK_INTERRUPTIONS();
- ht->u.flags |= HASH_FLAG_PACKED;
+ ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
HT_HASH_RESET_PACKED(ht);
p = ht->arData + idx;
p->h = h = zend_string_hash_val(key);
p->key = key;
- zend_string_addref(key);
+ if (!IS_INTERNED(key)) {
+ zend_string_addref(key);
+ ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
+ }
ZVAL_COPY_VALUE(&p->val, pData);
nIndex = h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
if (ht->pDestructor) {
SET_INCONSISTENT(HT_IS_DESTROYING);
- if (ht->u.flags & HASH_FLAG_PACKED) {
+ if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
SET_INCONSISTENT(HT_DESTROYED);
} else {
- if (!(ht->u.flags & HASH_FLAG_PACKED)) {
+ if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
if (EXPECTED(p->key)) {
end = p + ht->nNumUsed;
SET_INCONSISTENT(HT_IS_DESTROYING);
- if (ht->u.flags & HASH_FLAG_PACKED) {
+ if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
} while (++p != end);
p = ht->arData;
end = p + ht->nNumUsed;
if (ht->pDestructor) {
- if (ht->u.flags & HASH_FLAG_PACKED) {
+ if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
} while (++p != end);
}
} else {
- if (!(ht->u.flags & HASH_FLAG_PACKED)) {
+ if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
if (EXPECTED(p->key)) {
if (ht->nNumUsed) {
p = ht->arData;
end = p + ht->nNumUsed;
- do {
- if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
- i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
- if (EXPECTED(p->key)) {
- zend_string_release(p->key);
+ if (ht->u.flags & HASH_FLAG_STATIC_KEYS) {
+ do {
+ if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
+ ht->pDestructor(&p->val);
}
- }
- } while (++p != end);
- if (!(ht->u.flags & HASH_FLAG_PACKED)) {
- HT_HASH_RESET(ht);
+ } while (++p != end);
+ } else {
+ do {
+ if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
+ i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
+ if (EXPECTED(p->key)) {
+ zend_string_release(p->key);
+ }
+ }
+ } while (++p != end);
}
+ HT_HASH_RESET(ht);
}
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
target_idx = 0;
if (target->u.flags & HASH_FLAG_INITIALIZED) {
- if (target->u.flags & HASH_FLAG_PACKED) {
+ if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
+ target->nNumUsed = source->nNumUsed;
+ target->nNumOfElements = source->nNumOfElements;
+ target->nNextFreeElement = source->nNextFreeElement;
+ HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
+ target->nInternalPointer = source->nInternalPointer;
+ memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
+ if (target->nNumOfElements > 0 &&
+ target->nInternalPointer == HT_INVALID_IDX) {
+ idx = 0;
+ while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
+ idx++;
+ }
+ target->nInternalPointer = idx;
+ }
+ } else if (target->u.flags & HASH_FLAG_PACKED) {
target->nNumUsed = source->nNumUsed;
target->nNumOfElements = source->nNumOfElements;
target->nNextFreeElement = source->nNextFreeElement;
}
target->nInternalPointer = idx;
}
+ } else if (target->u.flags & HASH_FLAG_STATIC_KEYS) {
+ target->nNextFreeElement = source->nNextFreeElement;
+ HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
+ HT_HASH_RESET(target);
+
+ for (idx = 0; idx < source->nNumUsed; idx++) {
+ p = source->arData + idx;
+ if (Z_TYPE(p->val) == IS_UNDEF) continue;
+ /* INDIRECT element may point to UNDEF-ined slots */
+ data = &p->val;
+ if (Z_TYPE_P(data) == IS_INDIRECT) {
+ data = Z_INDIRECT_P(data);
+ if (Z_TYPE_P(data) == IS_UNDEF) {
+ continue;
+ }
+ }
+
+ if (source->nInternalPointer == idx) {
+ target->nInternalPointer = target_idx;
+ }
+
+ q = target->arData + target_idx;
+ q->h = p->h;
+ q->key = p->key;
+ nIndex = q->h | target->nTableMask;
+ Z_NEXT(q->val) = HT_HASH(target, nIndex);
+ HT_HASH(target, nIndex) = HT_IDX_TO_HASH(target_idx);
+ if (Z_OPT_REFCOUNTED_P(data)) {
+ if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1) {
+ ZVAL_COPY(&q->val, Z_REFVAL_P(data));
+ } else {
+ ZVAL_COPY(&q->val, data);
+ }
+ } else {
+ ZVAL_COPY_VALUE(&q->val, data);
+ }
+ target_idx++;
+ }
+ target->nNumUsed = target_idx;
+ target->nNumOfElements = target_idx;
+ if (target->nNumOfElements > 0 &&
+ target->nInternalPointer == HT_INVALID_IDX) {
+ target->nInternalPointer = 0;
+ }
} else {
target->nNextFreeElement = source->nNextFreeElement;
HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
void *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
- ht->u.flags |= HASH_FLAG_PACKED;
+ ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT));
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
#define HASH_FLAG_APPLY_PROTECTION (1<<1)
#define HASH_FLAG_PACKED (1<<2)
#define HASH_FLAG_INITIALIZED (1<<3)
+#define HASH_FLAG_STATIC_KEYS (1<<4)
#define HASH_MASK_CONSISTENCY 0x60
Bucket *p = ht->arData + idx;
ZVAL_COPY_VALUE(&p->val, zv);
- p->key = zend_string_copy(key);
- p->h = zend_string_hash_val(key);
+ if (!IS_INTERNED(key)) {
+ ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
+ zend_string_addref(key);
+ zend_string_hash_val(key);
+ }
+ p->key = key;
+ p->h = key->h;
nIndex = (uint32_t)p->h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
Bucket *p = ht->arData + idx;
ZVAL_PTR(&p->val, ptr);
- p->key = zend_string_copy(key);
- p->h = zend_string_hash_val(key);
+ if (!IS_INTERNED(key)) {
+ ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
+ zend_string_addref(key);
+ zend_string_hash_val(key);
+ }
+ p->key = key;
+ p->h = key->h;
nIndex = (uint32_t)p->h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
Bucket *p = ht->arData + idx;
ZVAL_INDIRECT(&p->val, ptr);
- p->key = zend_string_copy(key);
- p->h = zend_string_hash_val(key);
+ if (!IS_INTERNED(key)) {
+ ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
+ zend_string_addref(key);
+ zend_string_hash_val(key);
+ }
+ p->key = key;
+ p->h = key->h;
nIndex = (uint32_t)p->h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);