diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2011-10-10 17:58:30 -0700 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2011-10-10 17:58:30 -0700 |
commit | 3b1d656da56bdd403d1625a0c6a44d75cde36cc1 (patch) | |
tree | 80de8a68eacd596f0d120efc65dbbae3f324aea0 /deps/v8/src/arm/stub-cache-arm.cc | |
parent | 9bbca99107652906a060679ee95bf1ad7381cbb5 (diff) | |
download | node-3b1d656da56bdd403d1625a0c6a44d75cde36cc1.tar.gz |
Revert "Upgrade V8 to 3.6.6"
Not stable enough.
- Windows snapshot linking broken
- Linux crash on ./node_g test/simple/test-stream-pipe-multi.js
This reverts commit 56e6952e639ba1557a5b22333788583e9e39fa29.
Diffstat (limited to 'deps/v8/src/arm/stub-cache-arm.cc')
-rw-r--r-- | deps/v8/src/arm/stub-cache-arm.cc | 324 |
1 files changed, 151 insertions, 173 deletions
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 09ecc798c..f8565924b 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -431,13 +431,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ mov(name_reg, r0); - __ RecordWriteField(receiver_reg, - offset, - name_reg, - scratch, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -450,13 +444,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, r0); - __ RecordWriteField(scratch, - offset, - name_reg, - receiver_reg, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg); } // Return the value (register r0). @@ -565,10 +553,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { } -static MaybeObject* GenerateFastApiDirectCall( - MacroAssembler* masm, - const CallOptimization& optimization, - int argc) { +static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { // ----------- S t a t e ------------- // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee js function @@ -604,8 +591,6 @@ static MaybeObject* GenerateFastApiDirectCall( ApiFunction fun(api_function_address); const int kApiStackSpace = 4; - - FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // r0 = v8::Arguments& @@ -631,11 +616,9 @@ static MaybeObject* GenerateFastApiDirectCall( ExternalReference ref = ExternalReference(&fun, ExternalReference::DIRECT_API_CALL, masm->isolate()); - AllowExternalCallThatCantCauseGC scope(masm); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } - class CallInterceptorCompiler BASE_EMBEDDED { public: CallInterceptorCompiler(StubCompiler* stub_compiler, @@ -811,7 +794,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { miss_label); // Call a runtime function to load the interceptor property. - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); // Save the name_ register across the call. __ push(name_); @@ -828,8 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - - // Leave the internal frame. + __ LeaveInternalFrame(); } void LoadWithInterceptor(MacroAssembler* masm, @@ -838,19 +820,18 @@ class CallInterceptorCompiler BASE_EMBEDDED { JSObject* holder_obj, Register scratch, Label* interceptor_succeeded) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(holder, name_); - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - } + __ EnterInternalFrame(); + __ Push(holder, name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + __ LeaveInternalFrame(); // If interceptor returns no-result sentinel, call the constant function. __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); @@ -1247,10 +1228,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, ApiFunction fun(getter_address); const int kApiStackSpace = 1; - - FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - // Create AccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object **args_) as the data. __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); @@ -1310,44 +1288,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - { - FrameScope frame_scope(masm(), StackFrame::INTERNAL); + __ EnterInternalFrame(); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ Push(receiver, holder_reg, name_reg); - } else { - __ Push(holder_reg, name_reg); - } - - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ cmp(r0, scratch1); - __ b(eq, &interceptor_failed); - frame_scope.GenerateLeaveFrame(); - __ Ret(); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ Push(receiver, holder_reg, name_reg); + } else { + __ Push(holder_reg, name_reg); + } - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); + __ cmp(r0, scratch1); + __ b(eq, &interceptor_failed); + __ LeaveInternalFrame(); + __ Ret(); - // Leave the internal frame. + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); } + __ LeaveInternalFrame(); + // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. if (interceptor_holder != lookup->holder()) { @@ -1580,7 +1556,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, DONT_DO_SMI_CHECK); if (argc == 1) { // Otherwise fall through to call the builtin. - Label attempt_to_grow_elements; + Label exit, with_write_barrier, attempt_to_grow_elements; // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1595,15 +1571,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ cmp(r0, r4); __ b(gt, &attempt_to_grow_elements); - // Check if value is a smi. - Label with_write_barrier; - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ JumpIfNotSmi(r4, &with_write_barrier); - // Save new length. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Push the element. + __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); // We may need a register containing the address end_elements below, // so write back the value in end_elements. __ add(end_elements, elements, @@ -1613,31 +1585,14 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); // Check for a smi. + __ JumpIfNotSmi(r4, &with_write_barrier); + __ bind(&exit); __ Drop(argc + 1); __ Ret(); __ bind(&with_write_barrier); - - __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastSmiOnlyElements(r6, r6, &call_builtin); - - // Save new length. - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Push the element. - // We may need a register containing the address end_elements below, - // so write back the value in end_elements. - __ add(end_elements, elements, - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - - __ RecordWrite(elements, - end_elements, - r4, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ InNewSpace(elements, r4, eq, &exit); + __ RecordWriteHelper(elements, end_elements, r4); __ Drop(argc + 1); __ Ret(); @@ -1649,15 +1604,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(&call_builtin); } - __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case - // the new element is non-Smi. For now, delegate to the builtin. - Label no_fast_elements_check; - __ JumpIfSmi(r2, &no_fast_elements_check); - __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastObjectElements(r7, r7, &call_builtin); - __ bind(&no_fast_elements_check); - Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate); @@ -1684,7 +1630,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // Update new_space_allocation_top. __ str(r6, MemOperand(r7)); // Push the argument. - __ str(r2, MemOperand(end_elements)); + __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize)); + __ str(r6, MemOperand(end_elements)); // Fill the rest with holes. __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { @@ -2766,15 +2713,6 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); - __ mov(r1, r0); - __ RecordWriteField(r4, - JSGlobalPropertyCell::kValueOffset, - r1, - r2, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET); - Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); __ Ret(); @@ -3516,7 +3454,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3603,7 +3540,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3944,7 +3880,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4008,7 +3943,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4148,7 +4082,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4301,10 +4234,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement( - MacroAssembler* masm, - bool is_js_array, - ElementsKind elements_kind) { +void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, + bool is_js_array) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4346,33 +4277,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(value_reg, &miss_force_generic); - __ add(scratch, - elements_reg, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value_reg, MemOperand(scratch)); - } else { - ASSERT(elements_kind == FAST_ELEMENTS); - __ add(scratch, - elements_reg, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value_reg, MemOperand(scratch)); - __ mov(receiver_reg, value_reg); - __ RecordWrite(elements_reg, // Object. - scratch, // Address. - receiver_reg, // Value. - kLRHasNotBeenSaved, - kDontSaveFPRegs); - } + __ add(scratch, + elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ str(value_reg, + MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ RecordWrite(scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), + receiver_reg , elements_reg); + // value_reg (r0) is preserved. // Done. __ Ret(); @@ -4396,15 +4309,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r4 : scratch // -- r5 : scratch // ----------------------------------- - Label miss_force_generic; + Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; Register value_reg = r0; Register key_reg = r1; Register receiver_reg = r2; - Register elements_reg = r3; - Register scratch1 = r4; - Register scratch2 = r5; - Register scratch3 = r6; + Register scratch = r3; + Register elements_reg = r4; + Register mantissa_reg = r5; + Register exponent_reg = r6; Register scratch4 = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -4416,25 +4329,90 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { - __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { - __ ldr(scratch1, + __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - __ cmp(key_reg, scratch1); + __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - receiver_reg, - elements_reg, - scratch1, - scratch2, - scratch3, - scratch4, - &miss_force_generic); + // Handle smi values specially. + __ JumpIfSmi(value_reg, &smi_value); + + // Ensure that the object is a heap number + __ CheckMap(value_reg, + scratch, + masm->isolate()->factory()->heap_number_map(), + &miss_force_generic, + DONT_DO_SMI_CHECK); + + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 + // in the exponent. + __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); + __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); + __ cmp(exponent_reg, scratch); + __ b(ge, &maybe_nan); + + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + + __ bind(&have_double_value); + __ add(scratch, elements_reg, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + __ str(exponent_reg, FieldMemOperand(scratch, offset)); + __ Ret(); + + __ bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + __ b(gt, &is_nan); + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + __ cmp(mantissa_reg, Operand(0)); + __ b(eq, &have_double_value); + __ bind(&is_nan); + // Load canonical NaN for storing into the double array. + uint64_t nan_int64 = BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); + __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); + __ jmp(&have_double_value); + + __ bind(&smi_value); + __ add(scratch, elements_reg, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch, scratch, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + // scratch is now effective address of the double element + + FloatingPointHelper::Destination destination; + if (CpuFeatures::IsSupported(VFP3)) { + destination = FloatingPointHelper::kVFPRegisters; + } else { + destination = FloatingPointHelper::kCoreRegisters; + } + + Register untagged_value = receiver_reg; + __ SmiUntag(untagged_value, value_reg); + FloatingPointHelper::ConvertIntToDouble( + masm, + untagged_value, + destination, + d0, + mantissa_reg, + exponent_reg, + scratch4, + s2); + if (destination == FloatingPointHelper::kVFPRegisters) { + CpuFeatures::Scope scope(VFP3); + __ vstr(d0, scratch, 0); + } else { + __ str(mantissa_reg, MemOperand(scratch, 0)); + __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); + } __ Ret(); // Handle store cache miss, replacing the ic with the generic stub. |