#include "zend_globals.h"
#include "zend_variables.h"
-#define HT_DEBUG 0
-#if HT_DEBUG
-# define HT_ASSERT(c) ZEND_ASSERT(c)
+#if ZEND_DEBUG
+# define HT_ASSERT(ht, expr) \
+ ZEND_ASSERT((expr) || ((ht)->u.flags & HASH_FLAG_ALLOW_COW_VIOLATION))
#else
-# define HT_ASSERT(c)
+# define HT_ASSERT(ht, expr)
#endif
+#define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
+
#define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
#if ZEND_DEBUG
static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
{
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
if (packed) {
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
static zend_always_inline void zend_hash_check_init(HashTable *ht, int packed)
{
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
zend_hash_real_init_ex(ht, packed);
}
static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
{
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (ht->nTableSize >= HT_MAX_SIZE) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
}
{
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
zend_hash_real_init_ex(ht, packed);
}
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
ht->u.flags &= ~HASH_FLAG_PACKED;
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableMask = -ht->nTableSize;
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
{
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (nSize == 0) return;
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
if (nSize > ht->nTableSize) {
Bucket *p;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
CHECK_INIT(ht, 0);
Bucket *p;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
CHECK_INIT(ht, h < ht->nTableSize);
{
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
zend_hash_rehash(ht);
ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
{
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
}
Bucket *prev = NULL;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
h = zend_string_hash_val(key);
nIndex = h | ht->nTableMask;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
h = zend_string_hash_val(key);
nIndex = h | ht->nTableMask;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
h = zend_inline_hash_func(str, len);
nIndex = h | ht->nTableMask;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
h = zend_inline_hash_func(str, len);
nIndex = h | ht->nTableMask;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
Bucket *p, *end;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) <= 1);
+ HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
if (ht->nNumUsed) {
p = ht->arData;
Bucket *p, *end;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) <= 1);
+ HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
/* break possible cycles */
GC_REMOVE_FROM_BUFFER(ht);
Bucket *p, *end;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (ht->nNumUsed) {
p = ht->arData;
Bucket *p, *end;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (ht->nNumUsed) {
p = ht->arData;
Bucket *p;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
p = ht->arData;
for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
Bucket *p;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
idx = ht->nNumUsed;
p = ht->arData + ht->nNumUsed;
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
result = apply_func(&p->val, argument);
if (result & ZEND_HASH_APPLY_REMOVE) {
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
result = apply_func(&p->val, num_args, args, &hash_key);
if (result & ZEND_HASH_APPLY_REMOVE) {
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
IS_CONSISTENT(source);
IS_CONSISTENT(target);
- HT_ASSERT(GC_REFCOUNT(target) == 1);
+ HT_ASSERT_RC1(target);
setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
for (idx = 0; idx < source->nNumUsed; idx++) {
IS_CONSISTENT(source);
IS_CONSISTENT(target);
- HT_ASSERT(GC_REFCOUNT(target) == 1);
+ HT_ASSERT_RC1(target);
if (overwrite) {
for (idx = 0; idx < source->nNumUsed; idx++) {
IS_CONSISTENT(source);
IS_CONSISTENT(target);
- HT_ASSERT(GC_REFCOUNT(target) == 1);
+ HT_ASSERT_RC1(target);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
uint32_t idx;
IS_CONSISTENT(ht);
- HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
+ HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
for (idx = 0; idx < ht->nNumUsed; idx++) {
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
uint32_t idx;
IS_CONSISTENT(ht);
- HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
+ HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
idx = ht->nNumUsed;
while (idx > 0) {
uint32_t idx = *pos;
IS_CONSISTENT(ht);
- HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
+ HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
if (idx != HT_INVALID_IDX) {
while (1) {
uint32_t idx = *pos;
IS_CONSISTENT(ht);
- HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
+ HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
if (idx != HT_INVALID_IDX) {
while (idx > 0) {
uint32_t i, j;
IS_CONSISTENT(ht);
- HT_ASSERT(GC_REFCOUNT(ht) == 1);
+ HT_ASSERT_RC1(ht);
if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
return SUCCESS;
yy2:
++YYCURSOR;
yy3:
-#line 999 "ext/standard/var_unserializer.re"
+#line 1005 "ext/standard/var_unserializer.re"
{ return 0; }
#line 682 "ext/standard/var_unserializer.c"
yy4:
goto yy3;
yy15:
++YYCURSOR;
-#line 993 "ext/standard/var_unserializer.re"
+#line 999 "ext/standard/var_unserializer.re"
{
/* this is the case where we have less data than planned */
php_error_docref(NULL, E_NOTICE, "Unexpected end of serialized data");
goto yy18;
yy82:
++YYCURSOR;
-#line 841 "ext/standard/var_unserializer.re"
+#line 847 "ext/standard/var_unserializer.re"
{
size_t len, len2, len3, maxlen;
zend_long elements;
zend_hash_real_init(Z_ARRVAL_P(rval), 0);
}
+ /* The array may contain references to itself, in which case we'll be modifying an
+ * rc>1 array. This is okay, since the array is, ostensibly, only visible to
+ * unserialize (in practice unserialization handlers also see it). Ideally we should
+ * prohibit "r:" references to non-objects, as we only generate them for objects. */
+ HT_ALLOW_COW_VIOLATION(Z_ARRVAL_P(rval));
+
if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_P(rval), elements, 0)) {
return 0;
}
return finish_nested_data(UNSERIALIZE_PASSTHRU);
}
-#line 1378 "ext/standard/var_unserializer.c"
+#line 1384 "ext/standard/var_unserializer.c"
yy88:
yych = *++YYCURSOR;
if (yych <= ',') {
goto yy18;
yy92:
++YYCURSOR;
-#line 830 "ext/standard/var_unserializer.re"
+#line 836 "ext/standard/var_unserializer.re"
{
long elements;
if (!var_hash) return 0;
}
return object_common2(UNSERIALIZE_PASSTHRU, elements);
}
-#line 1414 "ext/standard/var_unserializer.c"
+#line 1420 "ext/standard/var_unserializer.c"
yy94:
++YYCURSOR;
#line 740 "ext/standard/var_unserializer.re"
ZVAL_STRINGL(rval, str, len);
return 1;
}
-#line 1449 "ext/standard/var_unserializer.c"
+#line 1455 "ext/standard/var_unserializer.c"
yy96:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
return 1;
}
-#line 1473 "ext/standard/var_unserializer.c"
+#line 1479 "ext/standard/var_unserializer.c"
}
-#line 1001 "ext/standard/var_unserializer.re"
+#line 1007 "ext/standard/var_unserializer.re"
return 0;