diff options
Diffstat (limited to 'Zend/zend_hash.c')
| -rw-r--r-- | Zend/zend_hash.c | 30 |
1 files changed, 0 insertions, 30 deletions
diff --git a/Zend/zend_hash.c b/Zend/zend_hash.c index 95652d598d..47215d3511 100644 --- a/Zend/zend_hash.c +++ b/Zend/zend_hash.c @@ -191,10 +191,8 @@ static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht) if (ht->nTableSize >= HT_MAX_SIZE) { zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket)); } - HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize += ht->nTableSize; HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); - HANDLE_UNBLOCK_INTERRUPTIONS(); } ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed) @@ -211,7 +209,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht) Bucket *old_buckets = ht->arData; HT_ASSERT(GC_REFCOUNT(ht) == 1); - HANDLE_BLOCK_INTERRUPTIONS(); ht->u.flags &= ~HASH_FLAG_PACKED; new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT); ht->nTableMask = -ht->nTableSize; @@ -219,7 +216,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht) memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); - HANDLE_UNBLOCK_INTERRUPTIONS(); } ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht) @@ -228,7 +224,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht) Bucket *old_buckets = ht->arData; HT_ASSERT(GC_REFCOUNT(ht) == 1); - HANDLE_BLOCK_INTERRUPTIONS(); new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT); ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS; ht->nTableMask = HT_MIN_MASK; @@ -236,7 +231,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht) HT_HASH_RESET_PACKED(ht); memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT); - HANDLE_UNBLOCK_INTERRUPTIONS(); } ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC) @@ -260,10 +254,8 @@ ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend if (packed) { ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED); if (nSize > ht->nTableSize) { - HANDLE_BLOCK_INTERRUPTIONS(); ht->nTableSize = zend_hash_check_size(nSize); HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT)); - HANDLE_UNBLOCK_INTERRUPTIONS(); } } else { ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED)); @@ -271,7 +263,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend void *new_data, *old_data = HT_GET_DATA_ADDR(ht); Bucket *old_buckets = ht->arData; nSize = zend_hash_check_size(nSize); - HANDLE_BLOCK_INTERRUPTIONS(); new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT); ht->nTableSize = nSize; ht->nTableMask = -ht->nTableSize; @@ -279,7 +270,6 @@ ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); - HANDLE_UNBLOCK_INTERRUPTIONS(); } } } @@ -591,12 +581,10 @@ static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_s data = Z_INDIRECT_P(data); } } - HANDLE_BLOCK_INTERRUPTIONS(); if (ht->pDestructor) { ht->pDestructor(data); } ZVAL_COPY_VALUE(data, pData); - HANDLE_UNBLOCK_INTERRUPTIONS(); return data; } } @@ -604,7 +592,6 @@ static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_s ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */ add_to_hash: - HANDLE_BLOCK_INTERRUPTIONS(); idx = ht->nNumUsed++; ht->nNumOfElements++; if (ht->nInternalPointer == HT_INVALID_IDX) { @@ -623,7 +610,6 @@ add_to_hash: nIndex = h | ht->nTableMask; Z_NEXT(p->val) = HT_HASH(ht, nIndex); HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); - HANDLE_UNBLOCK_INTERRUPTIONS(); return &p->val; } @@ -762,7 +748,6 @@ static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, } add_to_packed: - HANDLE_BLOCK_INTERRUPTIONS(); /* incremental initialization of empty Buckets */ if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) { ht->nNumUsed = h + 1; @@ -788,8 +773,6 @@ add_to_packed: p->key = NULL; ZVAL_COPY_VALUE(&p->val, pData); - HANDLE_UNBLOCK_INTERRUPTIONS(); - return &p->val; convert_to_hash: @@ -801,12 +784,10 @@ convert_to_hash: return NULL; } ZEND_ASSERT(&p->val != pData); - HANDLE_BLOCK_INTERRUPTIONS(); if (ht->pDestructor) { ht->pDestructor(&p->val); } ZVAL_COPY_VALUE(&p->val, pData); - HANDLE_UNBLOCK_INTERRUPTIONS(); if ((zend_long)h >= (zend_long)ht->nNextFreeElement) { ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX; } @@ -817,7 +798,6 @@ convert_to_hash: ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */ add_to_hash: - HANDLE_BLOCK_INTERRUPTIONS(); idx = ht->nNumUsed++; ht->nNumOfElements++; if (ht->nInternalPointer == HT_INVALID_IDX) { @@ -834,7 +814,6 @@ add_to_hash: ZVAL_COPY_VALUE(&p->val, pData); Z_NEXT(p->val) = HT_HASH(ht, nIndex); HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); - HANDLE_UNBLOCK_INTERRUPTIONS(); return &p->val; } @@ -876,15 +855,12 @@ static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht) HT_ASSERT(GC_REFCOUNT(ht) == 1); if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */ - HANDLE_BLOCK_INTERRUPTIONS(); zend_hash_rehash(ht); - HANDLE_UNBLOCK_INTERRUPTIONS(); } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */ void *new_data, *old_data = HT_GET_DATA_ADDR(ht); uint32_t nSize = ht->nTableSize + ht->nTableSize; Bucket *old_buckets = ht->arData; - HANDLE_BLOCK_INTERRUPTIONS(); new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT); ht->nTableSize = nSize; ht->nTableMask = -ht->nTableSize; @@ -892,7 +868,6 @@ static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht) memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed); pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT); zend_hash_rehash(ht); - HANDLE_UNBLOCK_INTERRUPTIONS(); } else { zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket)); } @@ -984,7 +959,6 @@ ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht) static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev) { - HANDLE_BLOCK_INTERRUPTIONS(); if (!(ht->u.flags & HASH_FLAG_PACKED)) { if (prev) { Z_NEXT(prev->val) = Z_NEXT(p->val); @@ -1027,7 +1001,6 @@ static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, } else { ZVAL_UNDEF(&p->val); } - HANDLE_UNBLOCK_INTERRUPTIONS(); } static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p) @@ -2296,7 +2269,6 @@ ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, co (swap_func_t)(renumber? zend_hash_bucket_renum_swap : ((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap))); - HANDLE_BLOCK_INTERRUPTIONS(); ht->nNumUsed = i; ht->nInternalPointer = 0; @@ -2333,8 +2305,6 @@ ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, co } } - HANDLE_UNBLOCK_INTERRUPTIONS(); - return SUCCESS; } |
