summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-03-02 21:04:37 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-03-02 21:04:37 -0800
commit14475c77a61795ab706ddca41be955ea8b8c9eeb (patch)
tree2d4a178aac7d8f4495bbafbe21e28531e4a71cc5 /deps/v8/src/arm
parentcf7b680db9f61a9f757581b1e3f4335e4402b722 (diff)
downloadnode-new-14475c77a61795ab706ddca41be955ea8b8c9eeb.tar.gz
Upgrade V8 to 3.1.8
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm.h1
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc706
-rw-r--r--deps/v8/src/arm/codegen-arm.cc47
-rw-r--r--deps/v8/src/arm/constants-arm.h8
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc582
-rw-r--r--deps/v8/src/arm/ic-arm.cc27
-rw-r--r--deps/v8/src/arm/lithium-arm.cc3
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc85
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h5
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc129
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h49
-rw-r--r--deps/v8/src/arm/simulator-arm.cc30
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc12
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc12
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h2
15 files changed, 1285 insertions, 413 deletions
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 954b9cff33..f5eb5075f6 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -284,6 +284,7 @@ const SwVfpRegister s29 = { 29 };
const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
+const DwVfpRegister no_dreg = { -1 };
const DwVfpRegister d0 = { 0 };
const DwVfpRegister d1 = { 1 };
const DwVfpRegister d2 = { 2 };
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 7d374eecd7..62eb3e6b8e 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -398,8 +398,11 @@ class FloatingPointHelper : public AllStatic {
Label* not_number);
// Loads the number from object into dst as a 32-bit integer if possible. If
- // the object is not a 32-bit integer control continues at the label
- // not_int32. If VFP is supported double_scratch is used but not scratch2.
+ // the object cannot be converted to a 32-bit integer control continues at
+ // the label not_int32. If VFP is supported double_scratch is used
+ // but not scratch2.
+ // Floating point value in the 32-bit integer range will be rounded
+ // to an integer.
static void LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
@@ -409,6 +412,76 @@ class FloatingPointHelper : public AllStatic {
DwVfpRegister double_scratch,
Label* not_int32);
+ // Load the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ static void LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ SwVfpRegister single_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ // scratch3 is not used when VFP3 is supported.
+ static void LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DwVfpRegister double_scratch,
+ Label* not_int32);
+
+ // Generate non VFP3 code to check if a double can be exactly represented by a
+ // 32-bit integer. This does not check for 0 or -0, which need
+ // to be checked for separately.
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+ // through otherwise.
+ // src1 and src2 will be cloberred.
+ //
+ // Expected input:
+ // - src1: higher (exponent) part of the double value.
+ // - src2: lower (mantissa) part of the double value.
+ // Output status:
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+ // - src2: contains 1.
+ // - other registers are clobbered.
+ static void DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32);
+
+ // Generates code to call a C function to do a double operation using core
+ // registers. (Used when VFP3 is not supported.)
+ // This code never falls through, but returns with a heap number containing
+ // the result in r0.
+ // Register heapnumber_result must be a heap number in which the
+ // result of the operation will be stored.
+ // Requires the following layout on entry:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch);
+
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
@@ -560,6 +633,319 @@ void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ SwVfpRegister single_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ __ JumpIfNotSmi(object, &obj_is_not_smi);
+ __ SmiUntag(scratch1, object);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(single_scratch, scratch1);
+ __ vcvt_f64_s32(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ vmov(dst1, dst2, double_dst);
+ }
+ } else {
+ Label fewer_than_20_useful_bits;
+ // Expected output:
+ // | dst1 | dst2 |
+ // | s | exp | mantissa |
+
+ // Check for zero.
+ __ cmp(scratch1, Operand(0));
+ __ mov(dst1, scratch1);
+ __ mov(dst2, scratch1);
+ __ b(eq, &done);
+
+ // Preload the sign of the value.
+ __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
+ // Get the absolute value of the object (as an unsigned integer).
+ __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
+
+ // Get mantisssa[51:20].
+
+ // Get the position of the first set bit.
+ __ CountLeadingZeros(dst2, scratch1, scratch2);
+ __ rsb(dst2, dst2, Operand(31));
+
+ // Set the exponent.
+ __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst1, scratch2, scratch2,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+ // Clear the first non null bit.
+ __ mov(scratch2, Operand(1));
+ __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
+
+ __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+ // Get the number of bits to set in the lower part of the mantissa.
+ __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ b(mi, &fewer_than_20_useful_bits);
+ // Set the higher 20 bits of the mantissa.
+ __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
+ __ rsb(scratch2, scratch2, Operand(32));
+ __ mov(dst2, Operand(scratch1, LSL, scratch2));
+ __ b(&done);
+
+ __ bind(&fewer_than_20_useful_bits);
+ __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ mov(scratch2, Operand(scratch1, LSL, scratch2));
+ __ orr(dst1, dst1, scratch2);
+ // Set dst2 to 0.
+ __ mov(dst2, Operand(0));
+ }
+
+ __ b(&done);
+
+ __ bind(&obj_is_not_smi);
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double value.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
+
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ double_dst,
+ scratch1,
+ scratch2,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ b(ne, not_int32);
+
+ if (destination == kCoreRegisters) {
+ __ vmov(dst1, dst2, double_dst);
+ }
+
+ } else {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ // Load the double value in the destination registers..
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // Check for 0 and -0.
+ __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
+ __ orr(scratch1, scratch1, Operand(dst2));
+ __ cmp(scratch1, Operand(0));
+ __ b(eq, &done);
+
+ // Check that the value can be exactly represented by a 32-bit integer.
+ // Jump to not_int32 if that's not the case.
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DwVfpRegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+ ASSERT(!scratch1.is(scratch2) &&
+ !scratch1.is(scratch3) &&
+ !scratch2.is(scratch3));
+
+ Label done;
+
+ // Untag the object into the destination register.
+ __ SmiUntag(dst, object);
+ // Just return if the object is a smi.
+ __ JumpIfSmi(object, &done);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ SwVfpRegister single_scratch = double_scratch.low();
+ // Load the double value.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
+
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ double_scratch,
+ scratch1,
+ scratch2,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ b(ne, not_int32);
+ // Get the result in the destination register.
+ __ vmov(dst, single_scratch);
+
+ } else {
+ // Load the double value in the destination registers.
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
+ __ orr(dst, scratch2, Operand(dst));
+ __ cmp(dst, Operand(0));
+ __ b(eq, &done);
+
+ DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+ // Registers state after DoubleIs32BitInteger.
+ // dst: mantissa[51:20].
+ // scratch2: 1
+
+ // Shift back the higher bits of the mantissa.
+ __ mov(dst, Operand(dst, LSR, scratch3));
+ // Set the implicit first bit.
+ __ rsb(scratch3, scratch3, Operand(32));
+ __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
+ // Set the sign.
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ __ rsb(dst, dst, Operand(0), LeaveCC, mi);
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32) {
+ // Get exponent alone in scratch.
+ __ Ubfx(scratch,
+ src1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Substract the bias from the exponent.
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
+
+ // src1: higher (exponent) part of the double value.
+ // src2: lower (mantissa) part of the double value.
+ // scratch: unbiased exponent.
+
+ // Fast cases. Check for obvious non 32-bit integer values.
+ // Negative exponent cannot yield 32-bit integers.
+ __ b(mi, not_int32);
+ // Exponent greater than 31 cannot yield 32-bit integers.
+ // Also, a positive value with an exponent equal to 31 is outside of the
+ // signed 32-bit integer range.
+ __ tst(src1, Operand(HeapNumber::kSignMask));
+ __ cmp(scratch, Operand(30), eq); // Executed for positive. If exponent is 30
+ // the gt condition will be "correct" and
+ // the next instruction will be skipped.
+ __ cmp(scratch, Operand(31), ne); // Executed for negative and positive where
+ // exponent is not 30.
+ __ b(gt, not_int32);
+ // - Bits [21:0] in the mantissa are not null.
+ __ tst(src2, Operand(0x3fffff));
+ __ b(ne, not_int32);
+
+ // Otherwise the exponent needs to be big enough to shift left all the
+ // non zero bits left. So we need the (30 - exponent) last bits of the
+ // 31 higher bits of the mantissa to be null.
+ // Because bits [21:0] are null, we can check instead that the
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+ // Get the 32 higher bits of the mantissa in dst.
+ __ Ubfx(dst,
+ src2,
+ HeapNumber::kMantissaBitsInTopWord,
+ 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ orr(dst,
+ dst,
+ Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+
+ // Create the mask and test the lower bits (of the higher bits).
+ __ rsb(scratch, scratch, Operand(32));
+ __ mov(src2, Operand(1));
+ __ mov(src1, Operand(src2, LSL, scratch));
+ __ sub(src1, src1, Operand(1));
+ __ tst(dst, src1);
+ __ b(ne, not_int32);
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+ MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Using core registers:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+
+ // Assert that heap_number_result is callee-saved.
+ // We currently always use r5 to pass it.
+ ASSERT(heap_number_result.is(r5));
+
+ // Push the current return address before the C call. Return will be
+ // through pop(pc) below.
+ __ push(lr);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op), 4);
+ // Store answer in the overwritable heap number.
+#if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from heap_number_result.
+ __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
+#else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(heap_number_result,
+ HeapNumber::kValueOffset));
+#endif
+ // Place heap_number_result in r0 and return to the pushed return address.
+ __ mov(r0, Operand(heap_number_result));
+ __ pop(pc);
+}
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
@@ -2707,33 +3093,11 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ add(r0, r0, Operand(kHeapObjectTag));
__ Ret();
} else {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(scratch1, result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
-#else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
-#endif
- // Plase result in r0 and return to the pushed return address.
- __ mov(r0, Operand(result));
- __ pop(pc);
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+ op_,
+ result,
+ scratch1);
}
break;
}
@@ -2779,7 +3143,6 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
break;
case Token::SAR:
// Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, ASR, r2));
break;
@@ -2924,7 +3287,288 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
- GenerateTypeTransition(masm);
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+ DwVfpRegister double_scratch = d0;
+ SwVfpRegister single_scratch = s3;
+
+ Register heap_number_result = no_reg;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label call_runtime;
+ // Labels for type transition, used for wrong input or output types.
+ // Both label are currently actually bound to the same position. We use two
+ // different label to differentiate the cause leading to type transition.
+ Label transition;
+
+ // Smi-smi fast case.
+ Label skip;
+ __ orr(scratch1, left, right);
+ __ JumpIfNotSmi(scratch1, &skip);
+ GenerateSmiSmiOperation(masm);
+ // Fall through if the result is not a smi.
+ __ bind(&skip);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers r0 and r1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ d7,
+ r2,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ d6,
+ r4,
+ r5,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
+
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ Label return_heap_number;
+ switch (op_) {
+ case Token::ADD:
+ __ vadd(d5, d6, d7);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d6, d7);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d6, d7);
+ break;
+ case Token::DIV:
+ __ vdiv(d5, d6, d7);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (op_ != Token::DIV) {
+ // These operations produce an integer result.
+ // Try to return a smi if we can.
+ // Otherwise return a heap number if allowed, or jump to type
+ // transition.
+
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ d5,
+ scratch1,
+ scratch2);
+
+ if (result_type_ <= TRBinaryOpIC::INT32) {
+ // If the ne condition is set, result does
+ // not fit in a 32-bit integer.
+ __ b(ne, &transition);
+ }
+
+ // Check if the result fits in a smi.
+ __ vmov(scratch1, single_scratch);
+ __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
+ // If not try to return a heap number.
+ __ b(mi, &return_heap_number);
+ // Tag the result and return.
+ __ SmiTag(r0, scratch1);
+ __ Ret();
+ }
+
+ if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
+ : TRBinaryOpIC::INT32) {
+ __ bind(&return_heap_number);
+ // We are using vfp registers so r5 is available.
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ mov(r0, heap_number_result);
+ __ Ret();
+ }
+
+ // A DIV operation expecting an integer result falls through
+ // to type transition.
+
+ } else {
+ // We preserved r0 and r1 to be able to call runtime.
+ // Save the left value on the stack.
+ __ Push(r5, r4);
+
+ // Allocate a heap number to store the result.
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
+ // Load the left value from the value saved on the stack.
+ __ Pop(r1, r0);
+
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(
+ masm, op_, heap_number_result, scratch1);
+ }
+
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label return_heap_number;
+ Register scratch3 = r5;
+ // Convert operands to 32-bit integers. Right in r2 and left in r3. The
+ // registers r0 and r1 (right and left) are preserved for the runtime
+ // call.
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ left,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ d0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ right,
+ r2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ d0,
+ &transition);
+
+ // The ECMA-262 standard specifies that, for shift operations, only the
+ // 5 least significant bits of the shift value should be used.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ orr(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_AND:
+ __ and_(r2, r3, Operand(r2));
+ break;
+ case Token::SAR:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // We only get a negative result if the shift value (r2) is 0.
+ // This result cannot be respresented as a signed 32-bit integer, try
+ // to return a heap number if we can.
+ // The non vfp3 code does not support this special case, so jump to
+ // runtime if we don't support it.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi,
+ (result_type_ <= TRBinaryOpIC::INT32) ? &transition
+ : &return_heap_number);
+ } else {
+ __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
+ : &call_runtime);
+ }
+ break;
+ case Token::SHL:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check if the result fits in a smi.
+ __ add(scratch1, r2, Operand(0x40000000), SetCC);
+ // If not try to return a heap number. (We know the result is an int32.)
+ __ b(mi, &return_heap_number);
+ // Tag the result and return.
+ __ SmiTag(r0, r2);
+ __ Ret();
+
+ __ bind(&return_heap_number);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_s32(double_scratch, double_scratch.low());
+ } else {
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_u32(double_scratch, double_scratch.low());
+ }
+
+ // Store the result.
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
+ __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
+ __ mov(r0, heap_number_result);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
+
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ if (transition.is_linked()) {
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 3e125a33fc..8bb576ded7 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1938,8 +1938,9 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
frame_->EmitPush(cp);
frame_->EmitPush(Operand(pairs));
frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
// The result is discarded.
}
@@ -3287,7 +3288,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// context slot followed by initialization.
frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
- frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+ frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
}
// Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling assignment expressions.
@@ -3637,7 +3639,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(key);
Load(value);
if (property->emit_store()) {
- frame_->CallRuntime(Runtime::kSetProperty, 3);
+ frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ frame_->CallRuntime(Runtime::kSetProperty, 4);
} else {
frame_->Drop(3);
}
@@ -5170,11 +5173,11 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
// Set the bit in the map to indicate that it has been checked safe for
// default valueOf and set true result.
- __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
__ orr(scratch1_,
scratch1_,
Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
__ mov(map_result_, Operand(1));
__ jmp(exit_label());
__ bind(&false_result);
@@ -6674,8 +6677,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
public:
DeferredReferenceSetKeyedValue(Register value,
Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
+ Register receiver,
+ StrictModeFlag strict_mode)
+ : value_(value),
+ key_(key),
+ receiver_(receiver),
+ strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetKeyedValue");
}
@@ -6685,6 +6692,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
Register value_;
Register key_;
Register receiver_;
+ StrictModeFlag strict_mode_;
};
@@ -6706,7 +6714,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// keyed store has been inlined.
@@ -6724,8 +6734,12 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
public:
DeferredReferenceSetNamedValue(Register value,
Register receiver,
- Handle<String> name)
- : value_(value), receiver_(receiver), name_(name) {
+ Handle<String> name,
+ StrictModeFlag strict_mode)
+ : value_(value),
+ receiver_(receiver),
+ name_(name),
+ strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetNamedValue");
}
@@ -6735,6 +6749,7 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
Register value_;
Register receiver_;
Handle<String> name_;
+ StrictModeFlag strict_mode_;
};
@@ -6754,7 +6769,9 @@ void DeferredReferenceSetNamedValue::Generate() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// named store has been inlined.
@@ -6943,7 +6960,8 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Register receiver = r1;
DeferredReferenceSetNamedValue* deferred =
- new DeferredReferenceSetNamedValue(value, receiver, name);
+ new DeferredReferenceSetNamedValue(
+ value, receiver, name, strict_mode_flag());
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
@@ -7129,7 +7147,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
// The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(value, key, receiver);
+ new DeferredReferenceSetKeyedValue(
+ value, key, receiver, strict_mode_flag());
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
@@ -7214,7 +7233,7 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
deferred->BindExit();
} else {
- frame()->CallKeyedStoreIC();
+ frame()->CallKeyedStoreIC(strict_mode_flag());
}
}
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 7ac38ed3ea..e6033a8977 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -385,7 +385,10 @@ enum VFPConversionMode {
kDefaultRoundToZero = 1
};
+// This mask does not include the "inexact" or "input denormal" cumulative
+// exceptions flags, because we usually don't want to check for it.
static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPInexactExceptionBit = 1 << 4;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPInvalidExceptionBit = 1;
@@ -411,6 +414,11 @@ enum VFPRoundingMode {
static const uint32_t kVFPRoundingModeMask = 3 << 22;
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
// -----------------------------------------------------------------------------
// Hints.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 9f521fb31e..7a47644781 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -339,23 +339,6 @@ void FullCodeGenerator::EmitReturnSequence() {
}
-FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
- Token::Value op, Expression* left, Expression* right) {
- ASSERT(ShouldInlineSmiCase(op));
- if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
- // We never generate inlined constant smi operations for these.
- return kNoConstants;
- } else if (right->IsSmiLiteral()) {
- return kRightConstant;
- } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
- // Don't inline shifts with constant left hand side.
- return kLeftConstant;
- } else {
- return kNoConstants;
- }
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
}
@@ -793,7 +776,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
prop->key()->AsLiteral()->handle()->IsSmi());
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(is_strict()
+ ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements).
}
@@ -809,10 +794,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- __ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ mov(r2, Operand(pairs));
+ __ mov(r1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, r2, r1, r0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored.
}
@@ -1456,7 +1442,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- __ CallRuntime(Runtime::kSetProperty, 3);
+ __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ push(r0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
}
@@ -1634,14 +1622,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- ConstantOperand constant = ShouldInlineSmiCase(op)
- ? GetConstantOperand(op, expr->target(), expr->value())
- : kNoConstants;
- ASSERT(constant == kRightConstant || constant == kNoConstants);
- if (constant == kNoConstants) {
- __ push(r0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
- }
+ __ push(r0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
@@ -1653,8 +1635,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
op,
mode,
expr->target(),
- expr->value(),
- constant);
+ expr->value());
} else {
EmitBinaryOp(op, mode);
}
@@ -1704,217 +1685,11 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
-void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
- OverwriteMode mode,
- bool left_is_constant_smi,
- Smi* value) {
- Label call_stub, done;
- // Optimistically add smi value with unknown object. If result overflows or is
- // not a smi then we had either a smi overflow or added a smi with a tagged
- // pointer.
- __ mov(r1, Operand(value));
- __ add(r2, r0, r1, SetCC);
- __ b(vs, &call_stub);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfNotSmi(r2, &call_stub);
- __ mov(r0, r2);
- __ b(&done);
-
- // Call the shared stub.
- __ bind(&call_stub);
- if (!left_is_constant_smi) {
- __ Swap(r0, r1, r2);
- }
- TypeRecordingBinaryOpStub stub(Token::ADD, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
- OverwriteMode mode,
- bool left_is_constant_smi,
- Smi* value) {
- Label call_stub, done;
- // Optimistically subtract smi value and unknown object. If result overflows
- // or is not a smi then we had either a smi overflow or subtraction between a
- // smi and a tagged pointer.
- __ mov(r1, Operand(value));
- if (left_is_constant_smi) {
- __ sub(r2, r1, r0, SetCC);
- } else {
- __ sub(r2, r0, r1, SetCC);
- }
- __ b(vs, &call_stub);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfNotSmi(r2, &call_stub);
- __ mov(r0, r2);
- __ b(&done);
-
- // Call the shared stub.
- __ bind(&call_stub);
- if (!left_is_constant_smi) {
- __ Swap(r0, r1, r2);
- }
- TypeRecordingBinaryOpStub stub(Token::SUB, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Smi* value) {
- Label call_stub, smi_case, done;
- int shift_value = value->value() & 0x1f;
-
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(r0, &smi_case);
-
- // Call stub.
- __ bind(&call_stub);
- __ mov(r1, r0);
- __ mov(r0, Operand(value));
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ b(&done);
-
- // Smi case.
- __ bind(&smi_case);
- switch (op) {
- case Token::SHL:
- if (shift_value != 0) {
- __ mov(r1, r0);
- if (shift_value > 1) {
- __ mov(r1, Operand(r1, LSL, shift_value - 1));
- }
- // Convert int result to smi, checking that it is in int range.
- __ SmiTag(r1, SetCC);
- __ b(vs, &call_stub);
- __ mov(r0, r1); // Put result back into r0.
- }
- break;
- case Token::SAR:
- if (shift_value != 0) {
- __ mov(r0, Operand(r0, ASR, shift_value));
- __ bic(r0, r0, Operand(kSmiTagMask));
- }
- break;
- case Token::SHR:
- // SHR must return a positive value. When shifting by 0 or 1 we need to
- // check that smi tagging the result will not create a negative value.
- if (shift_value < 2) {
- __ mov(r2, Operand(shift_value));
- __ SmiUntag(r1, r0);
- if (shift_value != 0) {
- __ mov(r1, Operand(r1, LSR, shift_value));
- }
- __ tst(r1, Operand(0xc0000000));
- __ b(ne, &call_stub);
- __ SmiTag(r0, r1); // result in r0.
- } else {
- __ SmiUntag(r0);
- __ mov(r0, Operand(r0, LSR, shift_value));
- __ SmiTag(r0);
- }
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- Smi* value) {
- Label smi_case, done;
-
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(r0, &smi_case);
-
- // The order of the arguments does not matter for bit-ops with a
- // constant operand.
- __ mov(r1, Operand(value));
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
-
- // Smi case.
- __ bind(&smi_case);
- __ mov(r1, Operand(value));
- switch (op) {
- case Token::BIT_OR:
- __ orr(r0, r0, Operand(r1));
- break;
- case Token::BIT_XOR:
- __ eor(r0, r0, Operand(r1));
- break;
- case Token::BIT_AND:
- __ and_(r0, r0, Operand(r1));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
- Token::Value op,
- OverwriteMode mode,
- bool left_is_constant_smi,
- Smi* value) {
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- EmitConstantSmiBitOp(expr, op, mode, value);
- break;
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- ASSERT(!left_is_constant_smi);
- EmitConstantSmiShiftOp(expr, op, mode, value);
- break;
- case Token::ADD:
- EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
- break;
- case Token::SUB:
- EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Expression* left_expr,
- Expression* right_expr,
- ConstantOperand constant) {
- if (constant == kRightConstant) {
- Smi* value = Smi::cast(*right_expr->AsLiteral()->handle());
- EmitConstantSmiBinaryOp(expr, op, mode, false, value);
- return;
- } else if (constant == kLeftConstant) {
- Smi* value = Smi::cast(*left_expr->AsLiteral()->handle());
- EmitConstantSmiBinaryOp(expr, op, mode, true, value);
- return;
- }
-
+ Expression* right_expr) {
Label done, smi_case, stub_call;
Register scratch1 = r2;
@@ -2050,7 +1825,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -2071,7 +1848,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ pop(r2);
}
__ pop(r0); // Restore value.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -2095,9 +1874,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
@@ -2166,9 +1945,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
case Slot::LOOKUP:
// Call the runtime for the assignment.
__ push(r0); // Value.
- __ mov(r0, Operand(slot->var()->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ __ mov(r1, Operand(slot->var()->name()));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
break;
}
}
@@ -2203,7 +1983,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(r1);
}
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -2247,7 +2029,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(r2);
}
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -2362,6 +2146,29 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
}
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+ int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ } else {
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ }
+ __ push(r1);
+
+ // Push the receiver of the enclosing function and do runtime call.
+ __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(r1);
+ // Push the strict mode flag.
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
+
+ __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+ ? Runtime::kResolvePossiblyDirectEvalNoLookup
+ : Runtime::kResolvePossiblyDirectEval, 4);
+}
+
+
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
@@ -2391,26 +2198,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // Push copy of the function - found below the arguments.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
-
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- __ push(r1);
- } else {
- __ push(r2);
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ Label done;
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ Label slow;
+ EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ // Push the function and resolve eval.
+ __ push(r0);
+ EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+ __ jmp(&done);
+ __ bind(&slow);
}
- // Push the receiver of the enclosing function and do runtime call.
- __ ldr(r1,
- MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
- __ push(r1);
- // Push the strict mode flag.
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ // Push copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
+ EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+ if (done.is_linked()) {
+ __ bind(&done);
+ }
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
@@ -3430,9 +3242,235 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = r0;
+ Register elements = no_reg; // Will be r0.
+ Register result = no_reg; // Will be r0.
+ Register separator = r1;
+ Register array_length = r2;
+ Register result_pos = no_reg; // Will be r2
+ Register string_length = r3;
+ Register string = r4;
+ Register element = r5;
+ Register elements_end = r6;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ // Separator operand is on the stack.
+ __ pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ b(ne, &bailout);
+
+ // Check that the array has fast elements.
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ tst(scratch2, Operand(1 << Map::kHasFastElements));
+ __ b(eq, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length, SetCC);
+ __ b(ne, &non_trivial_array);
+ __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ b(&done);
+
+ __ bind(&non_trivial_array);
+
+ // Get the FixedArray containing array's elements.
+ elements = array;
+ __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ array = no_reg; // End of array's live range.
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ mov(string_length, Operand(0));
+ __ add(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ cmp(array_length, Operand(0));
+ __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
+ }
+ __ bind(&loop);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch1));
+ __ b(vs, &bailout);
+ __ cmp(element, elements_end);
+ __ b(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, Operand(1));
+ __ b(ne, &not_size_one_array);
+ __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ b(&done);
+
+ __ bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array.
+ // string_length: Sum of string lengths (smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string. array_length is not
+ // smi but the other values are, so the result is a smi
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch1));
+ __ smull(scratch2, ip, array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ cmp(ip, Operand(0));
+ __ b(ne, &bailout);
+ __ tst(scratch2, Operand(0x80000000));
+ __ b(ne, &bailout);
+ __ add(string_length, string_length, Operand(scratch2));
+ __ b(vs, &bailout);
+ __ SmiUntag(string_length);
+
+ // Get first element in the array to free up the elements register to be used
+ // for the result.
+ __ add(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ result = elements; // End of live range for elements.
+ elements = no_reg;
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array.
+ __ AllocateAsciiString(result,
+ string_length,
+ scratch1,
+ scratch2,
+ elements_end,
+ &bailout);
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ result_pos = array_length; // End of live range for array_length.
+ array_length = no_reg;
+ __ add(result_pos,
+ result,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+ // Check the length of the separator.
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ b(eq, &one_char_separator);
+ __ b(gt, &long_separator);
+
+ // Empty separator case
+ __ bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &empty_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&one_char_separator_loop_entry);
+
+ __ bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ascii char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ bind(&one_char_separator_loop_entry);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string,
+ separator,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+
+ __ bind(&long_separator);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &long_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ __ bind(&bailout);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
context()->Plug(r0);
- return;
}
@@ -3767,7 +3805,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3782,7 +3822,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 6c7aa0643a..0fc6818703 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1400,7 +1400,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1411,11 +1412,16 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
+ __ Push(r1, r0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1470,7 +1476,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// r0: value.
// r1: key.
// r2: receiver.
- GenerateRuntimeSetProperty(masm);
+ GenerateRuntimeSetProperty(masm, strict_mode);
// Check whether the elements is a pixel array.
// r4: elements map.
@@ -1540,7 +1546,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- Code::ExtraICState extra_ic_state) {
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1552,7 +1558,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC,
- extra_ic_state);
+ strict_mode);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
@@ -1646,7 +1652,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1656,8 +1663,12 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
__ Push(r1, r2, r0);
+ __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(r1, r0);
+
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index d3c9fee8e3..54ed4bace3 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -1154,8 +1154,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
- MarkAsSaveDoubles(result);
- return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0)));
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index d375617adc..c5e9271760 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -573,7 +573,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data =
Factory::NewDeoptimizationInputData(length, TENURED);
- data->SetTranslationByteArray(*translations_.CreateByteArray());
+ Handle<ByteArray> translations = translations_.CreateByteArray();
+ data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
@@ -1985,11 +1986,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
__ StoreToSafepointRegisterSlot(temp, temp);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- ASSERT_EQ(kAdditionalDelta,
- masm_->InstructionsGeneratedSince(&before_push_delta));
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
// Put the result value into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(result, result);
@@ -2586,41 +2583,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-// Truncates a double using a specific rounding mode.
-// Clears the z flag (ne condition) if an overflow occurs.
-void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2) {
- Register prev_fpscr = scratch1;
- Register scratch = scratch2;
-
- // Set custom FPCSR:
- // - Set rounding mode.
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- __ vmrs(prev_fpscr);
- __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
- kVFPRoundingModeMask |
- kVFPFlushToZeroMask));
- __ orr(scratch, scratch, Operand(rounding_mode));
- __ vmsr(scratch);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(result,
- double_input,
- kFPSCRRounding);
-
- // Retrieve FPSCR.
- __ vmrs(scratch);
- // Restore FPSCR.
- __ vmsr(prev_fpscr);
- // Check for vfp exceptions.
- __ tst(scratch, Operand(kVFPExceptionMask));
-}
-
-
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -2628,11 +2590,11 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
- EmitVFPTruncate(kRoundToMinusInf,
- single_scratch,
- input,
- scratch1,
- scratch2);
+ __ EmitVFPTruncate(kRoundToMinusInf,
+ single_scratch,
+ input,
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
// Move the result back to general purpose register r0.
@@ -2654,11 +2616,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
Register result = ToRegister(instr->result());
Register scratch1 = scratch0();
Register scratch2 = result;
- EmitVFPTruncate(kRoundToNearest,
- double_scratch0().low(),
- input,
- scratch1,
- scratch2);
+ __ EmitVFPTruncate(kRoundToNearest,
+ double_scratch0().low(),
+ input,
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
__ vmov(result, double_scratch0().low());
@@ -2863,9 +2825,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic(Builtins::builtin(info_->is_strict()
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2907,7 +2869,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3371,11 +3335,12 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
- EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_input,
- scratch1,
- scratch2);
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ double_input,
+ scratch1,
+ scratch2);
+
// Deoptimize if we had a vfp invalid exception.
DeoptimizeIf(ne, instr->environment());
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 2d9c6edcb6..a26f6311e2 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -206,11 +206,6 @@ class LCodeGen BASE_EMBEDDED {
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
- void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
- DwVfpRegister double_input,
- Register scratch1,
- Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index e0f2916449..9340b61dd8 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -271,6 +271,29 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
}
+void MacroAssembler::Bfi(Register dst,
+ Register src,
+ Register scratch,
+ int lsb,
+ int width,
+ Condition cond) {
+ ASSERT(0 <= lsb && lsb < 32);
+ ASSERT(0 <= width && width < 32);
+ ASSERT(lsb + width < 32);
+ ASSERT(!scratch.is(dst));
+ if (width == 0) return;
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ bic(dst, dst, Operand(mask));
+ and_(scratch, src, Operand((1 << width) - 1));
+ mov(scratch, Operand(scratch, LSL, lsb));
+ orr(dst, dst, scratch);
+ } else {
+ bfi(dst, src, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
@@ -1818,9 +1841,9 @@ void MacroAssembler::ConvertToInt32(Register source,
ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
mov(dest, Operand(0, RelocInfo::NONE));
@@ -1883,6 +1906,52 @@ void MacroAssembler::ConvertToInt32(Register source,
}
+void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2,
+ CheckForInexactConversion check_inexact) {
+ ASSERT(CpuFeatures::IsSupported(VFP3));
+ CpuFeatures::Scope scope(VFP3);
+ Register prev_fpscr = scratch1;
+ Register scratch = scratch2;
+
+ int32_t check_inexact_conversion =
+ (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
+
+ // Set custom FPCSR:
+ // - Set rounding mode.
+ // - Clear vfp cumulative exception flags.
+ // - Make sure Flush-to-zero mode control bit is unset.
+ vmrs(prev_fpscr);
+ bic(scratch,
+ prev_fpscr,
+ Operand(kVFPExceptionMask |
+ check_inexact_conversion |
+ kVFPRoundingModeMask |
+ kVFPFlushToZeroMask));
+ // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
+ if (rounding_mode != kRoundToNearest) {
+ orr(scratch, scratch, Operand(rounding_mode));
+ }
+ vmsr(scratch);
+
+ // Convert the argument to an integer.
+ vcvt_s32_f64(result,
+ double_input,
+ (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
+ : kFPSCRRounding);
+
+ // Retrieve FPSCR.
+ vmrs(scratch);
+ // Restore FPSCR.
+ vmsr(prev_fpscr);
+ // Check for vfp exceptions.
+ tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
@@ -2389,6 +2458,60 @@ void MacroAssembler::CopyFields(Register dst,
}
+void MacroAssembler::CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch) {
+ Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+ // Align src before copying in word size chunks.
+ bind(&align_loop);
+ cmp(length, Operand(0));
+ b(eq, &done);
+ bind(&align_loop_1);
+ tst(src, Operand(kPointerSize - 1));
+ b(eq, &word_loop);
+ ldrb(scratch, MemOperand(src, 1, PostIndex));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ sub(length, length, Operand(1), SetCC);
+ b(ne, &byte_loop_1);
+
+ // Copy bytes in word size chunks.
+ bind(&word_loop);
+ if (FLAG_debug_code) {
+ tst(src, Operand(kPointerSize - 1));
+ Assert(eq, "Expecting alignment for CopyBytes");
+ }
+ cmp(length, Operand(kPointerSize));
+ b(lt, &byte_loop);
+ ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
+#if CAN_USE_UNALIGNED_ACCESSES
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
+#else
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+#endif
+ sub(length, length, Operand(kPointerSize));
+ b(&word_loop);
+
+ // Copy the last bytes if any left.
+ bind(&byte_loop);
+ cmp(length, Operand(0));
+ b(eq, &done);
+ bind(&byte_loop_1);
+ ldrb(scratch, MemOperand(src, 1, PostIndex));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ sub(length, length, Operand(1), SetCC);
+ b(ne, &byte_loop_1);
+ bind(&done);
+}
+
+
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 3e13c783db..acd1d79b7c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -121,6 +121,15 @@ class MacroAssembler: public Assembler {
Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
+ // The scratch register is not used for ARMv7.
+ // scratch can be the same register as src (in which case it is trashed), but
+ // not the same as dst.
+ void Bfi(Register dst,
+ Register src,
+ Register scratch,
+ int lsb,
+ int width,
+ Condition cond = al);
void Bfc(Register dst, int lsb, int width, Condition cond = al);
void Usat(Register dst, int satpos, const Operand& src,
Condition cond = al);
@@ -234,6 +243,17 @@ class MacroAssembler: public Assembler {
}
}
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Condition cond = al) {
+ ASSERT(!src1.is(src2));
+ if (src1.code() > src2.code()) {
+ ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ } else {
+ ldr(src2, MemOperand(sp, 4, PostIndex), cond);
+ ldr(src1, MemOperand(sp, 4, PostIndex), cond);
+ }
+ }
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -497,6 +517,14 @@ class MacroAssembler: public Assembler {
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ // Copies a number of bytes from src to dst. All registers are clobbered. On
+ // exit src and dst will point to the place just after where the last byte was
+ // read or written and length will be zero.
+ void CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -613,6 +641,19 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch,
Label *not_int32);
+// Truncates a double using a specific rounding mode.
+// Clears the z flag (ne condition) if an overflow occurs.
+// If exact_conversion is true, the z flag is also cleared if the conversion
+// was inexact, ie. if the double value could not be converted exactly
+// to a 32bit integer.
+ void EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2,
+ CheckForInexactConversion check
+ = kDontCheckForInexactConversion);
+
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case
@@ -777,11 +818,11 @@ class MacroAssembler: public Assembler {
mov(reg, scratch);
}
- void SmiUntag(Register reg) {
- mov(reg, Operand(reg, ASR, kSmiTagSize));
+ void SmiUntag(Register reg, SBit s = LeaveCC) {
+ mov(reg, Operand(reg, ASR, kSmiTagSize), s);
}
- void SmiUntag(Register dst, Register src) {
- mov(dst, Operand(src, ASR, kSmiTagSize));
+ void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
+ mov(dst, Operand(src, ASR, kSmiTagSize), s);
}
// Jump the register contains a smi.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 20d51c6af0..f475a18b09 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1005,7 +1005,9 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
#endif
@@ -1023,7 +1025,9 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
#endif
}
@@ -1038,7 +1042,9 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
#endif
@@ -1072,7 +1078,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
#endif
}
@@ -1089,7 +1097,9 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
#endif
}
@@ -2554,6 +2564,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
+ div_zero_vfp_flag_ = (dm_value == 0);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -2788,14 +2799,17 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
+ double abs_diff =
+ unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
+ : fabs(val - temp);
+
+ inexact_vfp_flag_ = (abs_diff != 0);
+
if (inv_op_vfp_flag_) {
temp = VFPConversionSaturate(val, unsigned_integer);
} else {
switch (mode) {
case RN: {
- double abs_diff =
- unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
- : fabs(val - temp);
int val_sign = (val > 0) ? 1 : -1;
if (abs_diff > 0.5) {
temp += val_sign;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index e2501125a0..60a11f3ced 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -2671,10 +2671,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
__ Push(r1, r2, r0); // Receiver, name, value.
+ __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
+ __ push(r0); // strict mode
+
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -4056,7 +4059,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ mov(r0, Operand(Smi::FromInt(
+ Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
+ __ Push(r1, r0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
return GetCode(flags);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index b4b518cff6..544e405dbb 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -332,9 +332,9 @@ void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
void VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual,
StrictModeFlag strict_mode) {
- Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
PopToR0();
RelocInfo::Mode mode;
if (is_contextual) {
@@ -359,8 +359,10 @@ void VirtualFrame::CallKeyedLoadIC() {
}
-void VirtualFrame::CallKeyedStoreIC() {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
PopToR1R0();
SpillAll();
EmitPop(r2);
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index b6e794a5c0..76470bdc53 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -303,7 +303,7 @@ class VirtualFrame : public ZoneObject {
// Call keyed store IC. Value, key and receiver are on the stack. All three
// are consumed. Result is returned in r0.
- void CallKeyedStoreIC();
+ void CallKeyedStoreIC(StrictModeFlag strict_mode);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,