diff options
author | Ben Noordhuis <info@bnoordhuis.nl> | 2013-04-17 16:10:37 +0200 |
---|---|---|
committer | Ben Noordhuis <info@bnoordhuis.nl> | 2013-04-17 16:10:37 +0200 |
commit | 9f682265d6631a29457abeb53827d01fa77493c8 (patch) | |
tree | 92a1eec49b1f280931598a72dcf0cca3d795f210 /deps/v8/src/arm | |
parent | 951e0b69fa3c8b1a5d708e29de9d6f7d1db79827 (diff) | |
download | node-9f682265d6631a29457abeb53827d01fa77493c8.tar.gz |
deps: upgrade v8 to 3.18.0
Diffstat (limited to 'deps/v8/src/arm')
22 files changed, 1325 insertions, 2367 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 123013b0a..0f9630b34 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -48,29 +48,17 @@ namespace internal { int Register::NumAllocatableRegisters() { - if (CpuFeatures::IsSupported(VFP2)) { - return kMaxNumAllocatableRegisters; - } else { - return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double; - } + return kMaxNumAllocatableRegisters; } int DwVfpRegister::NumRegisters() { - if (CpuFeatures::IsSupported(VFP2)) { - return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; - } else { - return 1; - } + return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; } int DwVfpRegister::NumAllocatableRegisters() { - if (CpuFeatures::IsSupported(VFP2)) { - return NumRegisters() - kNumReservedRegisters; - } else { - return 1; - } + return NumRegisters() - kNumReservedRegisters; } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 1574d51bb..bc21b6401 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -63,29 +63,21 @@ ExternalReference ExternalReference::cpu_features() { static unsigned CpuFeaturesImpliedByCompiler() { unsigned answer = 0; #ifdef CAN_USE_ARMV7_INSTRUCTIONS - answer |= 1u << ARMv7; + if (FLAG_enable_armv7) { + answer |= 1u << ARMv7; + } #endif // CAN_USE_ARMV7_INSTRUCTIONS #ifdef CAN_USE_VFP3_INSTRUCTIONS - answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7; + if (FLAG_enable_vfp3) { + answer |= 1u << VFP3 | 1u << ARMv7; + } #endif // CAN_USE_VFP3_INSTRUCTIONS -#ifdef CAN_USE_VFP2_INSTRUCTIONS - answer |= 1u << VFP2; -#endif // CAN_USE_VFP2_INSTRUCTIONS #ifdef CAN_USE_VFP32DREGS - answer |= 1u << VFP32DREGS; + if (FLAG_enable_32dregs) { + answer |= 1u << VFP32DREGS; + } #endif // CAN_USE_VFP32DREGS - -#ifdef __arm__ - // If the compiler is allowed to use VFP then we can use VFP too in our code - // generation even when generating snapshots. ARMv7 and hardware floating - // point support implies VFPv3, see ARM DDI 0406B, page A1-6. -#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \ - && !defined(__SOFTFP__) - answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; -#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) - // && !defined(__SOFTFP__) -#endif // _arm__ - if (answer & (1u << ARMv7)) { + if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) { answer |= 1u << UNALIGNED_ACCESSES; } @@ -94,18 +86,13 @@ static unsigned CpuFeaturesImpliedByCompiler() { const char* DwVfpRegister::AllocationIndexToString(int index) { - if (CpuFeatures::IsSupported(VFP2)) { - ASSERT(index >= 0 && index < NumAllocatableRegisters()); - ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == - kNumReservedRegisters - 1); - if (index >= kDoubleRegZero.code()) - index += kNumReservedRegisters; - - return VFPRegisters::Name(index, true); - } else { - ASSERT(index == 0); - return "sfpd0"; - } + ASSERT(index >= 0 && index < NumAllocatableRegisters()); + ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == + kNumReservedRegisters - 1); + if (index >= kDoubleRegZero.code()) + index += kNumReservedRegisters; + + return VFPRegisters::Name(index, true); } @@ -124,6 +111,8 @@ void CpuFeatures::Probe() { if (Serializer::enabled()) { // No probing for features if we might serialize (generate snapshot). + printf(" "); + PrintFeatures(); return; } @@ -133,8 +122,7 @@ void CpuFeatures::Probe() { if (FLAG_enable_vfp3) { supported_ |= static_cast<uint64_t>(1) << VFP3 | - static_cast<uint64_t>(1) << ARMv7 | - static_cast<uint64_t>(1) << VFP2; + static_cast<uint64_t>(1) << ARMv7; } // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled if (FLAG_enable_armv7) { @@ -153,48 +141,127 @@ void CpuFeatures::Probe() { supported_ |= static_cast<uint64_t>(1) << VFP32DREGS; } + if (FLAG_enable_unaligned_accesses) { + supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; + } + #else // __arm__ // Probe for additional features not already known to be available. - if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { + if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) { // This implementation also sets the VFP flags if runtime - // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI + // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI // 0406B, page A1-6. found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP3 | - static_cast<uint64_t>(1) << ARMv7 | - static_cast<uint64_t>(1) << VFP2; - } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { - found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP2; + static_cast<uint64_t>(1) << ARMv7; } - if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { + if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) { found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7; } - if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) { + if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) { found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV; } - if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) { + if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses + && OS::ArmCpuHasFeature(ARMv7)) { found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; } if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER && - OS::ArmCpuHasFeature(ARMv7)) { + FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) { found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; } - if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) { + if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs + && OS::ArmCpuHasFeature(VFP32DREGS)) { found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS; } supported_ |= found_by_runtime_probing_only_; #endif - // Assert that VFP3 implies VFP2 and ARMv7. - ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7))); + // Assert that VFP3 implies ARMv7. + ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7)); +} + + +void CpuFeatures::PrintTarget() { + const char* arm_arch = NULL; + const char* arm_test = ""; + const char* arm_fpu = ""; + const char* arm_thumb = ""; + const char* arm_float_abi = NULL; + +#if defined CAN_USE_ARMV7_INSTRUCTIONS + arm_arch = "arm v7"; +#else + arm_arch = "arm v6"; +#endif + +#ifdef __arm__ + +# ifdef ARM_TEST + arm_test = " test"; +# endif +# if defined __ARM_NEON__ + arm_fpu = " neon"; +# elif defined CAN_USE_VFP3_INSTRUCTIONS + arm_fpu = " vfp3"; +# else + arm_fpu = " vfp2"; +# endif +# if (defined __thumb__) || (defined __thumb2__) + arm_thumb = " thumb"; +# endif + arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp"; + +#else // __arm__ + + arm_test = " simulator"; +# if defined CAN_USE_VFP3_INSTRUCTIONS +# if defined CAN_USE_VFP32DREGS + arm_fpu = " vfp3"; +# else + arm_fpu = " vfp3-d16"; +# endif +# else + arm_fpu = " vfp2"; +# endif +# if USE_EABI_HARDFLOAT == 1 + arm_float_abi = "hard"; +# else + arm_float_abi = "softfp"; +# endif + +#endif // __arm__ + + printf("target%s %s%s%s %s\n", + arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi); +} + + +void CpuFeatures::PrintFeatures() { + printf( + "ARMv7=%d VFP3=%d VFP32DREGS=%d SUDIV=%d UNALIGNED_ACCESSES=%d " + "MOVW_MOVT_IMMEDIATE_LOADS=%d", + CpuFeatures::IsSupported(ARMv7), + CpuFeatures::IsSupported(VFP3), + CpuFeatures::IsSupported(VFP32DREGS), + CpuFeatures::IsSupported(SUDIV), + CpuFeatures::IsSupported(UNALIGNED_ACCESSES), + CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS)); +#ifdef __arm__ + bool eabi_hardfloat = OS::ArmUsingHardFloat(); +#elif USE_EABI_HARDFLOAT + bool eabi_hardfloat = true; +#else + bool eabi_hardfloat = false; +#endif + printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat); } @@ -1763,7 +1830,6 @@ void Assembler::vldr(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-924. // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) | // Vd(15-12) | 1011(11-8) | offset - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1807,7 +1873,6 @@ void Assembler::vldr(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-628. // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | offset - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1851,7 +1916,6 @@ void Assembler::vstr(const DwVfpRegister src, // Instruction details available in ARM DDI 0406C.b, A8-1082. // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) | // Vd(15-12) | 1011(11-8) | (offset/4) - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1895,7 +1959,6 @@ void Assembler::vstr(const SwVfpRegister src, // Instruction details available in ARM DDI 0406A, A8-786. // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | (offset/4) - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1938,7 +2001,6 @@ void Assembler::vldm(BlockAddrMode am, // Instruction details available in ARM DDI 0406C.b, A8-922. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1960,7 +2022,6 @@ void Assembler::vstm(BlockAddrMode am, // Instruction details available in ARM DDI 0406C.b, A8-1080. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1981,7 +2042,6 @@ void Assembler::vldm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-626. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1010(11-8) | (count/2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -2002,7 +2062,6 @@ void Assembler::vstm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-784. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count/2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -2016,7 +2075,7 @@ void Assembler::vstm(BlockAddrMode am, static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { uint64_t i; - memcpy(&i, &d, 8); + OS::MemCopy(&i, &d, 8); *lo = i & 0xffffffff; *hi = i >> 32; @@ -2076,8 +2135,6 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { void Assembler::vmov(const DwVfpRegister dst, double imm, const Register scratch) { - ASSERT(IsEnabled(VFP2)); - uint32_t enc; if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) { // The double can be encoded in the instruction. @@ -2148,7 +2205,6 @@ void Assembler::vmov(const SwVfpRegister dst, const Condition cond) { // Sd = Sm // Instruction details available in ARM DDI 0406B, A8-642. - ASSERT(IsEnabled(VFP2)); int sd, d, sm, m; dst.split_code(&sd, &d); src.split_code(&sm, &m); @@ -2163,7 +2219,6 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-938. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2181,7 +2236,6 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-940. // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(index.index == 0 || index.index == 1); int vd, d; dst.split_code(&vd, &d); @@ -2198,7 +2252,6 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(IsEnabled(VFP2)); ASSERT(!src1.is(pc) && !src2.is(pc)); int vm, m; dst.split_code(&vm, &m); @@ -2215,7 +2268,6 @@ void Assembler::vmov(const Register dst1, // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(IsEnabled(VFP2)); ASSERT(!dst1.is(pc) && !dst2.is(pc)); int vm, m; src.split_code(&vm, &m); @@ -2231,7 +2283,6 @@ void Assembler::vmov(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(!src.is(pc)); int sn, n; dst.split_code(&sn, &n); @@ -2246,7 +2297,6 @@ void Assembler::vmov(const Register dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(!dst.is(pc)); int sn, n; src.split_code(&sn, &n); @@ -2371,7 +2421,6 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); } @@ -2380,7 +2429,6 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); } @@ -2389,7 +2437,6 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); } @@ -2398,7 +2445,6 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); } @@ -2407,7 +2453,6 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); } @@ -2416,7 +2461,6 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); } @@ -2425,7 +2469,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); } @@ -2436,7 +2479,6 @@ void Assembler::vneg(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-968. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2453,7 +2495,6 @@ void Assembler::vabs(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-524. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2472,7 +2513,6 @@ void Assembler::vadd(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-830. // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2493,7 +2533,6 @@ void Assembler::vsub(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-1086. // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2514,7 +2553,6 @@ void Assembler::vmul(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-960. // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2571,7 +2609,6 @@ void Assembler::vdiv(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-882. // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2590,7 +2627,6 @@ void Assembler::vcmp(const DwVfpRegister src1, // Instruction details available in ARM DDI 0406C.b, A8-864. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; src1.split_code(&vd, &d); int vm, m; @@ -2607,7 +2643,6 @@ void Assembler::vcmp(const DwVfpRegister src1, // Instruction details available in ARM DDI 0406C.b, A8-864. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(src2 == 0.0); int vd, d; src1.split_code(&vd, &d); @@ -2619,7 +2654,6 @@ void Assembler::vmsr(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xE*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2629,7 +2663,6 @@ void Assembler::vmrs(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xF*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2641,7 +2674,6 @@ void Assembler::vsqrt(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-1058. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2747,9 +2779,9 @@ void Assembler::GrowBuffer() { // Copy the data. int pc_delta = desc.buffer - buffer_; int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); - memmove(desc.buffer, buffer_, desc.instr_size); - memmove(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.pos(), desc.reloc_size); + OS::MemMove(desc.buffer, buffer_, desc.instr_size); + OS::MemMove(reloc_info_writer.pos() + rc_delta, + reloc_info_writer.pos(), desc.reloc_size); // Switch buffers. DeleteArray(buffer_); @@ -2998,7 +3030,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { const double double_data = rinfo.data64(); uint64_t uint_data = 0; - memcpy(&uint_data, &double_data, sizeof(double_data)); + OS::MemCopy(&uint_data, &double_data, sizeof(double_data)); emit(uint_data & 0xFFFFFFFF); emit(uint_data >> 32); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 045638e12..0aecbcdd6 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -55,16 +55,15 @@ class CpuFeatures : public AllStatic { // is enabled (snapshots must be portable). static void Probe(); + // Display target use when compiling. + static void PrintTarget(); + + // Display features. + static void PrintFeatures(); + // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { ASSERT(initialized_); - if (f == VFP3 && !FLAG_enable_vfp3) return false; - if (f == VFP2 && !FLAG_enable_vfp2) return false; - if (f == SUDIV && !FLAG_enable_sudiv) return false; - if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { - return false; - } - if (f == VFP32DREGS && !FLAG_enable_32dregs) return false; return (supported_ & (1u << f)) != 0; } @@ -117,7 +116,6 @@ struct Register { static const int kNumRegisters = 16; static const int kMaxNumAllocatableRegisters = 8; static const int kSizeInBytes = 4; - static const int kGPRsPerNonVFP2Double = 2; inline static int NumAllocatableRegisters(); @@ -214,6 +212,7 @@ const Register pc = { kRegister_pc_Code }; // Single word VFP register. struct SwVfpRegister { + static const int kSizeInBytes = 4; bool is_valid() const { return 0 <= code_ && code_ < 32; } bool is(SwVfpRegister reg) const { return code_ == reg.code_; } int code() const { @@ -244,6 +243,7 @@ struct DwVfpRegister { static const int kNumReservedRegisters = 2; static const int kMaxNumAllocatableRegisters = kMaxNumRegisters - kNumReservedRegisters; + static const int kSizeInBytes = 8; // Note: the number of registers can be different at snapshot and run-time. // Any code included in the snapshot must be able to run both with 16 or 32 @@ -370,9 +370,6 @@ const DwVfpRegister d29 = { 29 }; const DwVfpRegister d30 = { 30 }; const DwVfpRegister d31 = { 31 }; -const Register sfpd_lo = { kRegister_r6_Code }; -const Register sfpd_hi = { kRegister_r7_Code }; - // Aliases for double registers. Defined using #define instead of // "static const DwVfpRegister&" because Clang complains otherwise when a // compilation unit that includes this header doesn't use the variables. diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index d982f2706..1db415292 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -30,6 +30,7 @@ #if defined(V8_TARGET_ARCH_ARM) #include "bootstrapper.h" +#include "builtins-decls.h" #include "code-stubs.h" #include "regexp-macro-assembler.h" #include "stub-cache.h" @@ -38,6 +39,18 @@ namespace v8 { namespace internal { +void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r3, r2, r1 }; + descriptor->register_param_count_ = 3; + descriptor->register_params_ = registers; + descriptor->stack_parameter_count_ = NULL; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; +} + + void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -133,7 +146,6 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, Label* lhs_not_nan, Label* slow, bool strict); -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, Register rhs); @@ -181,9 +193,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); - int map_index = (language_mode_ == CLASSIC_MODE) - ? Context::FUNCTION_MAP_INDEX - : Context::STRICT_MODE_FUNCTION_MAP_INDEX; + int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); // Compute the function map in the current native context and set that // as the map of the allocated object. @@ -403,153 +413,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { } -static void GenerateFastCloneShallowArrayCommon( - MacroAssembler* masm, - int length, - FastCloneShallowArrayStub::Mode mode, - AllocationSiteMode allocation_site_mode, - Label* fail) { - // Registers on entry: - // - // r3: boilerplate literal array. - ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); - - // All sizes here are multiples of kPointerSize. - int elements_size = 0; - if (length > 0) { - elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS - ? FixedDoubleArray::SizeFor(length) - : FixedArray::SizeFor(length); - } - - int size = JSArray::kSize; - int allocation_info_start = size; - if (allocation_site_mode == TRACK_ALLOCATION_SITE) { - size += AllocationSiteInfo::kSize; - } - size += elements_size; - - // Allocate both the JS array and the elements array in one big - // allocation. This avoids multiple limit checks. - AllocationFlags flags = TAG_OBJECT; - if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) { - flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags); - } - __ Allocate(size, r0, r1, r2, fail, flags); - - if (allocation_site_mode == TRACK_ALLOCATION_SITE) { - __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()-> - allocation_site_info_map()))); - __ str(r2, FieldMemOperand(r0, allocation_info_start)); - __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize)); - } - - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length == 0)) { - __ ldr(r1, FieldMemOperand(r3, i)); - __ str(r1, FieldMemOperand(r0, i)); - } - } - - if (length > 0) { - // Get hold of the elements array of the boilerplate and setup the - // elements pointer in the resulting object. - __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); - if (allocation_site_mode == TRACK_ALLOCATION_SITE) { - __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize)); - } else { - __ add(r2, r0, Operand(JSArray::kSize)); - } - __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); - - // Copy the elements array. - ASSERT((elements_size % kPointerSize) == 0); - __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); - } -} - -void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [sp]: constant elements. - // [sp + kPointerSize]: literal index. - // [sp + (2 * kPointerSize)]: literals array. - - // Load boilerplate object into r3 and check if we need to create a - // boilerplate. - Label slow_case; - __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); - __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); - __ b(eq, &slow_case); - - FastCloneShallowArrayStub::Mode mode = mode_; - if (mode == CLONE_ANY_ELEMENTS) { - Label double_elements, check_fast_elements; - __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset)); - __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex); - __ b(ne, &check_fast_elements); - GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS, - allocation_site_mode_, - &slow_case); - // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&check_fast_elements); - __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); - __ b(ne, &double_elements); - GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS, - allocation_site_mode_, - &slow_case); - // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&double_elements); - mode = CLONE_DOUBLE_ELEMENTS; - // Fall through to generate the code to handle double elements. - } - - if (FLAG_debug_code) { - const char* message; - Heap::RootListIndex expected_map_index; - if (mode == CLONE_ELEMENTS) { - message = "Expected (writable) fixed array"; - expected_map_index = Heap::kFixedArrayMapRootIndex; - } else if (mode == CLONE_DOUBLE_ELEMENTS) { - message = "Expected (writable) fixed double array"; - expected_map_index = Heap::kFixedDoubleArrayMapRootIndex; - } else { - ASSERT(mode == COPY_ON_WRITE_ELEMENTS); - message = "Expected copy-on-write fixed array"; - expected_map_index = Heap::kFixedCOWArrayMapRootIndex; - } - __ push(r3); - __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); - __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ CompareRoot(r3, expected_map_index); - __ Assert(eq, message); - __ pop(r3); - } - - GenerateFastCloneShallowArrayCommon(masm, length_, mode, - allocation_site_mode_, - &slow_case); - - // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); -} - - // Takes a Smi and converts to an IEEE 64 bit floating point value in two // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a @@ -650,30 +513,15 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, FloatingPointHelper::Destination destination, Register scratch1, Register scratch2) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); - __ vmov(d7.high(), scratch1); - __ vcvt_f64_s32(d7, d7.high()); - __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); - __ vmov(d6.high(), scratch1); - __ vcvt_f64_s32(d6, d6.high()); - if (destination == kCoreRegisters) { - __ vmov(r2, r3, d7); - __ vmov(r0, r1, d6); - } - } else { - ASSERT(destination == kCoreRegisters); - // Write Smi from r0 to r3 and r2 in double format. - __ mov(scratch1, Operand(r0)); - ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); - __ push(lr); - __ Call(stub1.GetCode(masm->isolate())); - // Write Smi from r1 to r1 and r0 in double format. - __ mov(scratch1, Operand(r1)); - ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); - __ Call(stub2.GetCode(masm->isolate())); - __ pop(lr); + __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); + __ vmov(d7.high(), scratch1); + __ vcvt_f64_s32(d7, d7.high()); + __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); + __ vmov(d6.high(), scratch1); + __ vcvt_f64_s32(d6, d6.high()); + if (destination == kCoreRegisters) { + __ vmov(r2, r3, d7); + __ vmov(r0, r1, d6); } } @@ -700,9 +548,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); // Handle loading a double from a heap number. - if (CpuFeatures::IsSupported(VFP2) && - destination == kVFPRegisters) { - CpuFeatureScope scope(masm, VFP2); + if (destination == kVFPRegisters) { // Load the double from tagged HeapNumber to double register. __ sub(scratch1, object, Operand(kHeapObjectTag)); __ vldr(dst, scratch1, HeapNumber::kValueOffset); @@ -715,23 +561,12 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a smi. __ bind(&is_smi); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Convert smi to double using VFP instructions. - __ vmov(dst.high(), scratch1); - __ vcvt_f64_s32(dst, dst.high()); - if (destination == kCoreRegisters) { - // Load the converted smi to dst1 and dst2 in double format. - __ vmov(dst1, dst2, dst); - } - } else { - ASSERT(destination == kCoreRegisters); - // Write smi to dst1 and dst2 double format. - __ mov(scratch1, Operand(object)); - ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); - __ push(lr); - __ Call(stub.GetCode(masm->isolate())); - __ pop(lr); + // Convert smi to double using VFP instructions. + __ vmov(dst.high(), scratch1); + __ vcvt_f64_s32(dst, dst.high()); + if (destination == kCoreRegisters) { + // Load the converted smi to dst1 and dst2 in double format. + __ vmov(dst1, dst2, dst); } __ bind(&done); @@ -778,62 +613,10 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Label done; - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(single_scratch, int_scratch); - __ vcvt_f64_s32(double_dst, single_scratch); - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - } else { - Label fewer_than_20_useful_bits; - // Expected output: - // | dst_exponent | dst_mantissa | - // | s | exp | mantissa | - - // Check for zero. - __ cmp(int_scratch, Operand::Zero()); - __ mov(dst_exponent, int_scratch); - __ mov(dst_mantissa, int_scratch); - __ b(eq, &done); - - // Preload the sign of the value. - __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); - // Get the absolute value of the object (as an unsigned integer). - __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); - - // Get mantissa[51:20]. - - // Get the position of the first set bit. - __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); - __ rsb(dst_mantissa, dst_mantissa, Operand(31)); - - // Set the exponent. - __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); - __ Bfi(dst_exponent, scratch2, scratch2, - HeapNumber::kExponentShift, HeapNumber::kExponentBits); - - // Clear the first non null bit. - __ mov(scratch2, Operand(1)); - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); - - __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); - // Get the number of bits to set in the lower part of the mantissa. - __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), - SetCC); - __ b(mi, &fewer_than_20_useful_bits); - // Set the higher 20 bits of the mantissa. - __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); - __ rsb(scratch2, scratch2, Operand(32)); - __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); - __ b(&done); - - __ bind(&fewer_than_20_useful_bits); - __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); - __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); - __ orr(dst_exponent, dst_exponent, scratch2); - // Set dst1 to 0. - __ mov(dst_mantissa, Operand::Zero()); + __ vmov(single_scratch, int_scratch); + __ vcvt_f64_s32(double_dst, single_scratch); + if (destination == kCoreRegisters) { + __ vmov(dst_mantissa, dst_exponent, double_dst); } __ bind(&done); } @@ -872,65 +655,17 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); // Load the number. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); - - __ TestDoubleIsInt32(double_dst, double_scratch); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - - } else { - ASSERT(!scratch1.is(object) && !scratch2.is(object)); - // Load the double value in the destination registers. - bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); - if (save_registers) { - // Save both output registers, because the other one probably holds - // an important value too. - __ Push(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); - - // Check for 0 and -0. - Label zero; - __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); - __ orr(scratch1, scratch1, Operand(dst_mantissa)); - __ cmp(scratch1, Operand::Zero()); - __ b(eq, &zero); - - // Check that the value can be exactly represented by a 32-bit integer. - // Jump to not_int32 if that's not the case. - Label restore_input_and_miss; - DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, - &restore_input_and_miss); - - // dst_* were trashed. Reload the double value. - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); - __ b(&done); - - __ bind(&restore_input_and_miss); - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ b(not_int32); + __ TestDoubleIsInt32(double_dst, double_scratch); + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); - __ bind(&zero); - if (save_registers) { - __ Drop(2); - } + if (destination == kCoreRegisters) { + __ vmov(dst_mantissa, dst_exponent, double_dst); } - __ bind(&done); } @@ -963,43 +698,13 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, // Object is a heap number. // Convert the floating point value to a 32-bit integer. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); - __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); - } else { - // Load the double value in the destination registers. - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); - - // Check for 0 and -0. - __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); - __ orr(dst, scratch2, Operand(dst)); - __ cmp(dst, Operand::Zero()); - __ b(eq, &done); - - DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); - - // Registers state after DoubleIs32BitInteger. - // dst: mantissa[51:20]. - // scratch2: 1 - - // Shift back the higher bits of the mantissa. - __ mov(dst, Operand(dst, LSR, scratch3)); - // Set the implicit first bit. - __ rsb(scratch3, scratch3, Operand(32)); - __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); - // Set the sign. - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); - __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); - } + __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); __ b(&done); __ bind(&maybe_undefined); @@ -1093,7 +798,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ push(lr); __ PrepareCallCFunction(0, 2, scratch); if (masm->use_eabi_hardfloat()) { - CpuFeatureScope scope(masm, VFP2); __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } @@ -1105,7 +809,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { - CpuFeatureScope scope(masm, VFP2); __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); } else { @@ -1318,23 +1021,11 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, } // Lhs is a smi, rhs is a number. - if (CpuFeatures::IsSupported(VFP2)) { - // Convert lhs to a double in d7. - CpuFeatureScope scope(masm, VFP2); - __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); - // Load the double from rhs, tagged HeapNumber r0, to d6. - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - } else { - __ push(lr); - // Convert lhs to a double in r2, r3. - __ mov(r7, Operand(lhs)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ Call(stub1.GetCode(masm->isolate())); - // Load rhs to a double in r0, r1. - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - __ pop(lr); - } + // Convert lhs to a double in d7. + __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); + // Load the double from rhs, tagged HeapNumber r0, to d6. + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); // We now have both loaded as doubles but we can skip the lhs nan check // since it's a smi. @@ -1358,23 +1049,11 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, } // Rhs is a smi, lhs is a heap number. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Load the double from lhs, tagged HeapNumber r1, to d7. - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - // Convert rhs to a double in d6 . - __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); - } else { - __ push(lr); - // Load lhs to a double in r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - // Convert rhs to a double in r0, r1. - __ mov(r7, Operand(rhs)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode(masm->isolate())); - __ pop(lr); - } + // Load the double from lhs, tagged HeapNumber r1, to d7. + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + // Convert rhs to a double in d6 . + __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); // Fall through to both_loaded_as_doubles. } @@ -1431,60 +1110,6 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { // See comment at call site. -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, - Condition cond) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - - // r0, r1, r2, r3 have the two doubles. Neither is a NaN. - if (cond == eq) { - // Doubles are not equal unless they have the same bit pattern. - // Exception: 0 and -0. - __ cmp(rhs_mantissa, Operand(lhs_mantissa)); - __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); - // Return non-zero if the numbers are unequal. - __ Ret(ne); - - __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); - // If exponents are equal then return 0. - __ Ret(eq); - - // Exponents are unequal. The only way we can return that the numbers - // are equal is if one is -0 and the other is 0. We already dealt - // with the case where both are -0 or both are 0. - // We start by seeing if the mantissas (that are equal) or the bottom - // 31 bits of the rhs exponent are non-zero. If so we return not - // equal. - __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); - __ mov(r0, Operand(r4), LeaveCC, ne); - __ Ret(ne); - // Now they are equal if and only if the lhs exponent is zero in its - // low 31 bits. - __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); - __ Ret(); - } else { - // Call a native function to do a comparison between two non-NaNs. - // Call C routine that may not cause GC or other trouble. - __ push(lr); - __ PrepareCallCFunction(0, 2, r5); - if (masm->use_eabi_hardfloat()) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(d0, r0, r1); - __ vmov(d1, r2, r3); - } - - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), - 0, 2); - __ pop(pc); // Return. - } -} - - -// See comment at call site. static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, Register rhs) { @@ -1547,16 +1172,10 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, // Both are heap numbers. Load them up then jump to the code we have // for that. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - } else { - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - } + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); __ jmp(both_loaded_as_doubles); } @@ -1637,42 +1256,37 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Label load_result_from_cache; if (!object_is_smi) { __ JumpIfSmi(object, &is_smi); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - DONT_DO_SMI_CHECK); - - STATIC_ASSERT(8 == kDoubleSize); - __ add(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); - __ eor(scratch1, scratch1, Operand(scratch2)); - __ and_(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch1, - number_string_cache, - Operand(scratch1, LSL, kPointerSizeLog2 + 1)); - - Register probe = mask; - __ ldr(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - __ sub(scratch2, object, Operand(kHeapObjectTag)); - __ vldr(d0, scratch2, HeapNumber::kValueOffset); - __ sub(probe, probe, Operand(kHeapObjectTag)); - __ vldr(d1, probe, HeapNumber::kValueOffset); - __ VFPCompareAndSetFlags(d0, d1); - __ b(ne, not_found); // The cache did not contain this value. - __ b(&load_result_from_cache); - } else { - __ b(not_found); - } + __ CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); + + STATIC_ASSERT(8 == kDoubleSize); + __ add(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); + __ eor(scratch1, scratch1, Operand(scratch2)); + __ and_(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ add(scratch1, + number_string_cache, + Operand(scratch1, LSL, kPointerSizeLog2 + 1)); + + Register probe = mask; + __ ldr(probe, + FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ JumpIfSmi(probe, not_found); + __ sub(scratch2, object, Operand(kHeapObjectTag)); + __ vldr(d0, scratch2, HeapNumber::kValueOffset); + __ sub(probe, probe, Operand(kHeapObjectTag)); + __ vldr(d1, probe, HeapNumber::kValueOffset); + __ VFPCompareAndSetFlags(d0, d1); + __ b(ne, not_found); // The cache did not contain this value. + __ b(&load_result_from_cache); } __ bind(&is_smi); @@ -1787,37 +1401,27 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // The arguments have been converted to doubles and stored in d6 and d7, if // VFP3 is supported, or in r0, r1, r2, and r3. Isolate* isolate = masm->isolate(); - if (CpuFeatures::IsSupported(VFP2)) { - __ bind(&lhs_not_nan); - CpuFeatureScope scope(masm, VFP2); - Label no_nan; - // ARMv7 VFP3 instructions to implement double precision comparison. - __ VFPCompareAndSetFlags(d7, d6); - Label nan; - __ b(vs, &nan); - __ mov(r0, Operand(EQUAL), LeaveCC, eq); - __ mov(r0, Operand(LESS), LeaveCC, lt); - __ mov(r0, Operand(GREATER), LeaveCC, gt); - __ Ret(); + __ bind(&lhs_not_nan); + Label no_nan; + // ARMv7 VFP3 instructions to implement double precision comparison. + __ VFPCompareAndSetFlags(d7, d6); + Label nan; + __ b(vs, &nan); + __ mov(r0, Operand(EQUAL), LeaveCC, eq); + __ mov(r0, Operand(LESS), LeaveCC, lt); + __ mov(r0, Operand(GREATER), LeaveCC, gt); + __ Ret(); - __ bind(&nan); - // If one of the sides was a NaN then the v flag is set. Load r0 with - // whatever it takes to make the comparison fail, since comparisons with NaN - // always fail. - if (cc == lt || cc == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); + __ bind(&nan); + // If one of the sides was a NaN then the v flag is set. Load r0 with + // whatever it takes to make the comparison fail, since comparisons with NaN + // always fail. + if (cc == lt || cc == le) { + __ mov(r0, Operand(GREATER)); } else { - // Checks for NaN in the doubles we have loaded. Can return the answer or - // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc); - // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the - // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc); + __ mov(r0, Operand(LESS)); } + __ Ret(); __ bind(¬_smis); // At this point we know we are dealing with two different objects, @@ -1914,7 +1518,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { // we cannot call anything that could cause a GC from this stub. Label patch; const Register map = r9.is(tos_) ? r7 : r9; - const Register temp = map; // undefined -> false. CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); @@ -1957,9 +1560,9 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { if (types_.Contains(STRING)) { // String value -> false iff empty. - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); - __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); - __ Ret(lt); // the string length is OK as the return value + __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); + __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); + __ Ret(lt); // the string length is OK as the return value } if (types_.Contains(HEAP_NUMBER)) { @@ -1968,55 +1571,13 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); __ b(ne, ¬_heap_number); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); - __ VFPCompareAndSetFlags(d1, 0.0); - // "tos_" is a register, and contains a non zero value by default. - // Hence we only need to overwrite "tos_" with zero to return false for - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO - __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN - } else { - Label done, not_nan, not_zero; - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); - // -0 maps to false: - __ bic( - temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC); - __ b(ne, ¬_zero); - // If exponent word is zero then the answer depends on the mantissa word. - __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); - __ jmp(&done); - - // Check for NaN. - __ bind(¬_zero); - // We already zeroed the sign bit, now shift out the mantissa so we only - // have the exponent left. - __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); - unsigned int shifted_exponent_mask = - HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; - __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32)); - __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN. - - // Reload exponent word. - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); - __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32)); - // If mantissa is not zero then we have a NaN, so return 0. - __ mov(tos_, Operand::Zero(), LeaveCC, ne); - __ b(ne, &done); - - // Load mantissa word. - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); - __ cmp(temp, Operand::Zero()); - // If mantissa is not zero then we have a NaN, so return 0. - __ mov(tos_, Operand::Zero(), LeaveCC, ne); - __ b(ne, &done); - - __ bind(¬_nan); - __ mov(tos_, Operand(1, RelocInfo::NONE32)); - __ bind(&done); - } + __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); + __ VFPCompareAndSetFlags(d1, 0.0); + // "tos_" is a register, and contains a non zero value by default. + // Hence we only need to overwrite "tos_" with zero to return false for + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. + __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO + __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN __ Ret(); __ bind(¬_heap_number); } @@ -2069,7 +1630,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { const Register scratch = r1; if (save_doubles_ == kSaveFPRegs) { - CpuFeatureScope scope(masm, VFP2); // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(scratch); @@ -2089,8 +1649,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatureScope scope(masm, VFP2); - // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(scratch); @@ -2315,19 +1873,10 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, __ bind(&heapnumber_allocated); } - if (CpuFeatures::IsSupported(VFP2)) { - // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, r1); - __ vcvt_f64_s32(d0, s0); - __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ Ret(); - } else { - // WriteInt32ToHeapNumberStub does not trigger GC, so we do not - // have to set up a frame. - WriteInt32ToHeapNumberStub stub(r1, r0, r2); - __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); - } + __ vmov(s0, r1); + __ vcvt_f64_s32(d0, s0); + __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ Ret(); } @@ -2383,7 +1932,7 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { void BinaryOpStub::Initialize() { - platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); + platform_specific_bit_ = true; // VFP2 is a base requirement for V8 } @@ -2662,7 +2211,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 // depending on whether VFP3 is available or not. FloatingPointHelper::Destination destination = - CpuFeatures::IsSupported(VFP2) && op != Token::MOD ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; @@ -2706,7 +2254,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Using VFP registers: // d6: Left value // d7: Right value - CpuFeatureScope scope(masm, VFP2); switch (op) { case Token::ADD: __ vadd(d5, d6, d7); @@ -2797,11 +2344,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // The code below for writing into heap numbers isn't capable of // writing the register as an unsigned int so we go to slow case if we // hit this case. - if (CpuFeatures::IsSupported(VFP2)) { - __ b(mi, &result_not_a_smi); - } else { - __ b(mi, not_numbers); - } + __ b(mi, &result_not_a_smi); break; case Token::SHL: // Use only the 5 least significant bits of the shift count. @@ -2837,25 +2380,17 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // result. __ mov(r0, Operand(r5)); - if (CpuFeatures::IsSupported(VFP2)) { - // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As - // mentioned above SHR needs to always produce a positive result. - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, r2); - if (op == Token::SHR) { - __ vcvt_f64_u32(d0, s0); - } else { - __ vcvt_f64_s32(d0, s0); - } - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r3, HeapNumber::kValueOffset); - __ Ret(); + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As + // mentioned above SHR needs to always produce a positive result. + __ vmov(s0, r2); + if (op == Token::SHR) { + __ vcvt_f64_u32(d0, s0); } else { - // Tail call that writes the int32 in r2 to the heap number in r0, using - // r3 as scratch. r0 is preserved and returned. - WriteInt32ToHeapNumberStub stub(r2, r0, r3); - __ TailCallStub(&stub); + __ vcvt_f64_s32(d0, s0); } + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); + __ Ret(); break; } default: @@ -3001,8 +2536,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. - FloatingPointHelper::Destination destination = - (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) + FloatingPointHelper::Destination destination = (op_ != Token::MOD) ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; @@ -3032,7 +2566,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &transition); if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatureScope scope(masm, VFP2); Label return_heap_number; switch (op_) { case Token::ADD: @@ -3200,17 +2733,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // We only get a negative result if the shift value (r2) is 0. // This result cannot be respresented as a signed 32-bit integer, try // to return a heap number if we can. - // The non vfp2 code does not support this special case, so jump to - // runtime if we don't support it. - if (CpuFeatures::IsSupported(VFP2)) { - __ b(mi, (result_type_ <= BinaryOpIC::INT32) - ? &transition - : &return_heap_number); - } else { - __ b(mi, (result_type_ <= BinaryOpIC::INT32) - ? &transition - : &call_runtime); - } + __ b(mi, (result_type_ <= BinaryOpIC::INT32) + ? &transition + : &return_heap_number); break; case Token::SHL: __ and_(r2, r2, Operand(0x1f)); @@ -3238,31 +2763,22 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &call_runtime, mode_); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - if (op_ != Token::SHR) { - // Convert the result to a floating point value. - __ vmov(double_scratch.low(), r2); - __ vcvt_f64_s32(double_scratch, double_scratch.low()); - } else { - // The result must be interpreted as an unsigned 32-bit integer. - __ vmov(double_scratch.low(), r2); - __ vcvt_f64_u32(double_scratch, double_scratch.low()); - } - - // Store the result. - __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); - __ vstr(double_scratch, r0, HeapNumber::kValueOffset); - __ mov(r0, heap_number_result); - __ Ret(); + if (op_ != Token::SHR) { + // Convert the result to a floating point value. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_s32(double_scratch, double_scratch.low()); } else { - // Tail call that writes the int32 in r2 to the heap number in r0, using - // r3 as scratch. r0 is preserved and returned. - __ mov(r0, r5); - WriteInt32ToHeapNumberStub stub(r2, r0, r3); - __ TailCallStub(&stub); + // The result must be interpreted as an unsigned 32-bit integer. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_u32(double_scratch, double_scratch.low()); } + // Store the result. + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(double_scratch, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); + break; } @@ -3441,100 +2957,96 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { const Register cache_entry = r0; const bool tagged = (argument_type_ == TAGGED); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - if (tagged) { - // Argument is a number and is on stack and in r0. - // Load argument and check if it is a smi. - __ JumpIfNotSmi(r0, &input_not_smi); - - // Input is a smi. Convert to double and load the low and high words - // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); - __ b(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ CheckMap(r0, - r1, - Heap::kHeapNumberMapRootIndex, - &calculate, - DONT_DO_SMI_CHECK); - // Input is a HeapNumber. Load it to a double register and store the - // low and high words into r2, r3. - __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ vmov(r2, r3, d0); - } else { - // Input is untagged double in d2. Output goes to d2. - __ vmov(r2, r3, d2); - } - __ bind(&loaded); - // r2 = low 32 bits of double value - // r3 = high 32 bits of double value - // Compute hash (the shifts are arithmetic): - // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); - __ eor(r1, r2, Operand(r3)); - __ eor(r1, r1, Operand(r1, ASR, 16)); - __ eor(r1, r1, Operand(r1, ASR, 8)); - ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); - __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); - - // r2 = low 32 bits of double value. - // r3 = high 32 bits of double value. - // r1 = TranscendentalCache::hash(double value). - Isolate* isolate = masm->isolate(); - ExternalReference cache_array = - ExternalReference::transcendental_cache_array_address(isolate); - __ mov(cache_entry, Operand(cache_array)); - // cache_entry points to cache array. - int cache_array_index - = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); - __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); - // r0 points to the cache for the type type_. - // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(cache_entry, Operand::Zero()); - __ b(eq, &invalid_cache); + if (tagged) { + // Argument is a number and is on stack and in r0. + // Load argument and check if it is a smi. + __ JumpIfNotSmi(r0, &input_not_smi); + + // Input is a smi. Convert to double and load the low and high words + // of the double into r2, r3. + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r0, + r1, + Heap::kHeapNumberMapRootIndex, + &calculate, + DONT_DO_SMI_CHECK); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r2, r3. + __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ vmov(r2, r3, d0); + } else { + // Input is untagged double in d2. Output goes to d2. + __ vmov(r2, r3, d2); + } + __ bind(&loaded); + // r2 = low 32 bits of double value + // r3 = high 32 bits of double value + // Compute hash (the shifts are arithmetic): + // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); + __ eor(r1, r2, Operand(r3)); + __ eor(r1, r1, Operand(r1, ASR, 16)); + __ eor(r1, r1, Operand(r1, ASR, 8)); + ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); + __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); + + // r2 = low 32 bits of double value. + // r3 = high 32 bits of double value. + // r1 = TranscendentalCache::hash(double value). + Isolate* isolate = masm->isolate(); + ExternalReference cache_array = + ExternalReference::transcendental_cache_array_address(isolate); + __ mov(cache_entry, Operand(cache_array)); + // cache_entry points to cache array. + int cache_array_index + = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); + __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); + // r0 points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ cmp(cache_entry, Operand::Zero()); + __ b(eq, &invalid_cache); #ifdef DEBUG - // Check that the layout of cache elements match expectations. - { TranscendentalCache::SubCache::Element test_elem[2]; - char* elem_start = reinterpret_cast<char*>(&test_elem[0]); - char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); - char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); - char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); - CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. - CHECK_EQ(0, elem_in0 - elem_start); - CHECK_EQ(kIntSize, elem_in1 - elem_start); - CHECK_EQ(2 * kIntSize, elem_out - elem_start); - } + // Check that the layout of cache elements match expectations. + { TranscendentalCache::SubCache::Element test_elem[2]; + char* elem_start = reinterpret_cast<char*>(&test_elem[0]); + char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); + char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); + CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. + CHECK_EQ(0, elem_in0 - elem_start); + CHECK_EQ(kIntSize, elem_in1 - elem_start); + CHECK_EQ(2 * kIntSize, elem_out - elem_start); + } #endif - // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. - __ add(r1, r1, Operand(r1, LSL, 1)); - __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); - // Check if cache matches: Double value is stored in uint32_t[2] array. - __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); - __ cmp(r2, r4); - __ cmp(r3, r5, eq); - __ b(ne, &calculate); - // Cache hit. Load result, cleanup and return. - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter( - counters->transcendental_cache_hit(), 1, scratch0, scratch1); - if (tagged) { - // Pop input value from stack and load result into r0. - __ pop(); - __ mov(r0, Operand(r6)); - } else { - // Load result into d2. - __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); - } - __ Ret(); - } // if (CpuFeatures::IsSupported(VFP3)) + // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. + __ add(r1, r1, Operand(r1, LSL, 1)); + __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); + // Check if cache matches: Double value is stored in uint32_t[2] array. + __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); + __ cmp(r2, r4); + __ cmp(r3, r5, eq); + __ b(ne, &calculate); + // Cache hit. Load result, cleanup and return. + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter( + counters->transcendental_cache_hit(), 1, scratch0, scratch1); + if (tagged) { + // Pop input value from stack and load result into r0. + __ pop(); + __ mov(r0, Operand(r6)); + } else { + // Load result into d2. + __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); + } + __ Ret(); __ bind(&calculate); - Counters* counters = masm->isolate()->counters(); __ IncrementCounter( counters->transcendental_cache_miss(), 1, scratch0, scratch1); if (tagged) { @@ -3543,9 +3055,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { ExternalReference(RuntimeFunction(), masm->isolate()); __ TailCallExternalReference(runtime_function, 1, 1); } else { - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(masm, VFP2); - Label no_update; Label skip_cache; @@ -3605,7 +3114,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, Register scratch) { - ASSERT(masm->IsEnabled(VFP2)); Isolate* isolate = masm->isolate(); __ push(lr); @@ -3666,7 +3174,6 @@ void InterruptStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatureScope vfp2_scope(masm, VFP2); const Register base = r1; const Register exponent = r2; const Register heapnumbermap = r5; @@ -3879,14 +3386,13 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { CEntryStub::GenerateAheadOfTime(isolate); WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); + StubFailureTrampolineStub::GenerateAheadOfTime(isolate); RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); } void CodeStub::GenerateFPStubs(Isolate* isolate) { - SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) - ? kSaveFPRegs - : kDontSaveFPRegs; + SaveFPRegsMode mode = kSaveFPRegs; CEntryStub save_doubles(1, mode); StoreBufferOverflowStub stub(mode); // These stubs might already be in the snapshot, detect that and don't @@ -3895,11 +3401,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) { Code* save_doubles_code; if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { save_doubles_code = *save_doubles.GetCode(isolate); - save_doubles_code->set_is_pregenerated(true); - - Code* store_buffer_overflow_code = *stub.GetCode(isolate); - store_buffer_overflow_code->set_is_pregenerated(true); } + Code* store_buffer_overflow_code; + if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) { + store_buffer_overflow_code = *stub.GetCode(isolate); + } + save_doubles_code->set_is_pregenerated(true); + store_buffer_overflow_code->set_is_pregenerated(true); isolate->set_fp_stubs_generated(true); } @@ -3989,6 +3497,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, masm->Jump(r5); } + __ VFPEnsureFPSCRState(r2); + if (always_allocate) { // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 // though (contain the result). @@ -4024,11 +3534,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Special handling of out of memory exceptions. JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); - // Retrieve the pending exception and clear the variable. - __ mov(r3, Operand(isolate->factory()->the_hole_value())); + // Retrieve the pending exception. __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(ip)); + + // See if we just retrieved an OOM exception. + JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); + + // Clear the pending exception. + __ mov(r3, Operand(isolate->factory()->the_hole_value())); + __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); __ str(r3, MemOperand(ip)); // Special handling of termination exceptions which are uncatchable @@ -4146,13 +3663,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Save callee-saved registers (incl. cp and fp), sp, and lr __ stm(db_w, sp, kCalleeSaved | lr.bit()); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Save callee-saved vfp registers. - __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); - // Set up the reserved register for 0.0. - __ vmov(kDoubleRegZero, 0.0); - } + // Save callee-saved vfp registers. + __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); + // Set up the reserved register for 0.0. + __ vmov(kDoubleRegZero, 0.0); + __ VFPEnsureFPSCRState(r4); // Get address of argv, see stm above. // r0: code entry @@ -4162,9 +3677,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Set up argv in r4. int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; - if (CpuFeatures::IsSupported(VFP2)) { - offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; - } + offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; __ ldr(r4, MemOperand(sp, offset_to_argv)); // Push a frame with special values setup to mark it as an entry frame. @@ -4300,11 +3813,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { } #endif - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Restore callee-saved vfp registers. - __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); - } + // Restore callee-saved vfp registers. + __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); } @@ -4948,7 +4458,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); // Copy the JS object part. - __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); + __ CopyFields(r0, r4, d0, s0, JSObject::kHeaderSize / kPointerSize); // Get the length (smi tagged) and set that as an in-object property too. STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); @@ -7009,50 +6519,46 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { } // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or VFP2 is unsupported. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - // Load left and right operand. - Label done, left, left_smi, right_smi; - __ JumpIfSmi(r0, &right_smi); - __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, - DONT_DO_SMI_CHECK); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vldr(d1, r2, HeapNumber::kValueOffset); - __ b(&left); - __ bind(&right_smi); - __ SmiUntag(r2, r0); // Can't clobber r0 yet. - SwVfpRegister single_scratch = d2.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d1, single_scratch); - - __ bind(&left); - __ JumpIfSmi(r1, &left_smi); - __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, - DONT_DO_SMI_CHECK); - __ sub(r2, r1, Operand(kHeapObjectTag)); - __ vldr(d0, r2, HeapNumber::kValueOffset); - __ b(&done); - __ bind(&left_smi); - __ SmiUntag(r2, r1); // Can't clobber r1 yet. - single_scratch = d3.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d0, single_scratch); + // stub if NaN is involved. + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(r0, &right_smi); + __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, + DONT_DO_SMI_CHECK); + __ sub(r2, r0, Operand(kHeapObjectTag)); + __ vldr(d1, r2, HeapNumber::kValueOffset); + __ b(&left); + __ bind(&right_smi); + __ SmiUntag(r2, r0); // Can't clobber r0 yet. + SwVfpRegister single_scratch = d2.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d1, single_scratch); + + __ bind(&left); + __ JumpIfSmi(r1, &left_smi); + __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, + DONT_DO_SMI_CHECK); + __ sub(r2, r1, Operand(kHeapObjectTag)); + __ vldr(d0, r2, HeapNumber::kValueOffset); + __ b(&done); + __ bind(&left_smi); + __ SmiUntag(r2, r1); // Can't clobber r1 yet. + single_scratch = d3.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d0, single_scratch); - __ bind(&done); - // Compare operands. - __ VFPCompareAndSetFlags(d0, d1); + __ bind(&done); + // Compare operands. + __ VFPCompareAndSetFlags(d0, d1); - // Don't base result on status bits when a NaN is involved. - __ b(vs, &unordered); + // Don't base result on status bits when a NaN is involved. + __ b(vs, &unordered); - // Return a result of -1, 0, or 1, based on status bits. - __ mov(r0, Operand(EQUAL), LeaveCC, eq); - __ mov(r0, Operand(LESS), LeaveCC, lt); - __ mov(r0, Operand(GREATER), LeaveCC, gt); - __ Ret(); - } + // Return a result of -1, 0, or 1, based on status bits. + __ mov(r0, Operand(EQUAL), LeaveCC, eq); + __ mov(r0, Operand(LESS), LeaveCC, lt); + __ mov(r0, Operand(GREATER), LeaveCC, gt); + __ Ret(); __ bind(&unordered); __ bind(&generic_stub); @@ -7343,6 +6849,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, __ Jump(target); // Call the C++ function. ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta, masm->SizeOfCodeGeneratedSince(&start)); + __ VFPEnsureFPSCRState(r2); } @@ -7662,11 +7169,6 @@ bool RecordWriteStub::IsPregenerated() { } -bool StoreBufferOverflowStub::IsPregenerated() { - return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); -} - - void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); @@ -7689,7 +7191,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { bool CodeStub::CanUseFPRegisters() { - return CpuFeatures::IsSupported(VFP2); + return true; // VFP2 is a base requirement for V8 } @@ -7948,16 +7450,14 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ StoreNumberToDoubleElements(r0, r3, // Overwrites all regs after this. - r5, r6, r7, r9, r2, + r5, r9, r6, r7, r2, &slow_elements); __ Ret(); } void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - ASSERT(!Serializer::enabled()); - bool save_fp_regs = CpuFeatures::IsSupported(VFP2); - CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs); + CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 61ecc975f..741ff9ca8 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -61,11 +61,11 @@ class TranscendentalCacheStub: public PlatformCodeStub { class StoreBufferOverflowStub: public PlatformCodeStub { public: explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { } + : save_doubles_(save_fp) {} void Generate(MacroAssembler* masm); - virtual bool IsPregenerated(); + virtual bool IsPregenerated() { return true; } static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } @@ -471,7 +471,6 @@ class RecordWriteStub: public PlatformCodeStub { if (mode == kSaveFPRegs) { // Number of d-regs not known at snapshot time. ASSERT(!Serializer::enabled()); - CpuFeatureScope scope(masm, VFP2); masm->sub(sp, sp, Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); @@ -489,7 +488,6 @@ class RecordWriteStub: public PlatformCodeStub { if (mode == kSaveFPRegs) { // Number of d-regs not known at snapshot time. ASSERT(!Serializer::enabled()); - CpuFeatureScope scope(masm, VFP2); // Restore all VFP registers except d0. // TODO(hans): We should probably restore d0 too. And maybe use vldm. for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 1c829469c..9d773d4cc 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -62,7 +62,6 @@ double fast_exp_simulator(double x) { UnaryMathFunction CreateExpFunction() { - if (!CpuFeatures::IsSupported(VFP2)) return &exp; if (!FLAG_fast_math) return &exp; size_t actual_size; byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); @@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() { MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); { - CpuFeatureScope use_vfp(&masm, VFP2); DwVfpRegister input = d0; DwVfpRegister result = d1; DwVfpRegister double_scratch1 = d2; @@ -185,7 +183,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // -- r4 : scratch (elements) // ----------------------------------- Label loop, entry, convert_hole, gc_required, only_change_map, done; - bool vfp2_supported = CpuFeatures::IsSupported(VFP2); if (mode == TRACK_ALLOCATION_SITE) { __ TestJSArrayForAllocationSiteInfo(r2, r4); @@ -248,7 +245,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // r5: kHoleNanUpper32 // r6: end of destination FixedDoubleArray, not tagged // r7: begin of FixedDoubleArray element fields, not tagged - if (!vfp2_supported) __ Push(r1, r0); __ b(&entry); @@ -276,23 +272,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole); // Normal smi, convert to double and store. - if (vfp2_supported) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, r9); - __ vcvt_f64_s32(d0, s0); - __ vstr(d0, r7, 0); - __ add(r7, r7, Operand(8)); - } else { - FloatingPointHelper::ConvertIntToDouble(masm, - r9, - FloatingPointHelper::kCoreRegisters, - d0, - r0, - r1, - lr, - s0); - __ Strd(r0, r1, MemOperand(r7, 8, PostIndex)); - } + __ vmov(s0, r9); + __ vcvt_f64_s32(d0, s0); + __ vstr(d0, r7, 0); + __ add(r7, r7, Operand(8)); __ b(&entry); // Hole found, store the-hole NaN. @@ -310,7 +293,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( __ cmp(r7, r6); __ b(lt, &loop); - if (!vfp2_supported) __ Pop(r1, r0); __ pop(lr); __ bind(&done); } diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index cdca1f531..a13048476 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -51,7 +51,7 @@ double Instruction::DoubleImmedVmov() const { uint64_t imm = high16 << 48; double d; - memcpy(&d, &imm, 8); + OS::MemCopy(&d, &imm, 8); return d; } diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 004165ac3..747dc5627 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -44,21 +44,25 @@ defined(__ARM_ARCH_7R__) || \ defined(__ARM_ARCH_7__) # define CAN_USE_ARMV7_INSTRUCTIONS 1 +#ifndef CAN_USE_VFP3_INSTRUCTIONS +# define CAN_USE_VFP3_INSTRUCTIONS +#endif #endif -#if defined(__ARM_ARCH_6__) || \ - defined(__ARM_ARCH_6J__) || \ - defined(__ARM_ARCH_6K__) || \ - defined(__ARM_ARCH_6Z__) || \ +#if defined(__ARM_ARCH_6__) || \ + defined(__ARM_ARCH_6J__) || \ + defined(__ARM_ARCH_6K__) || \ + defined(__ARM_ARCH_6Z__) || \ defined(__ARM_ARCH_6ZK__) || \ defined(__ARM_ARCH_6T2__) || \ defined(CAN_USE_ARMV7_INSTRUCTIONS) # define CAN_USE_ARMV6_INSTRUCTIONS 1 #endif -#if defined(__ARM_ARCH_5T__) || \ - defined(__ARM_ARCH_5TE__) || \ - defined(__ARM_ARCH_5TEJ__) || \ +#if defined(__ARM_ARCH_5__) || \ + defined(__ARM_ARCH_5T__) || \ + defined(__ARM_ARCH_5TE__) || \ + defined(__ARM_ARCH_5TEJ__) || \ defined(CAN_USE_ARMV6_INSTRUCTIONS) # define CAN_USE_ARMV5_INSTRUCTIONS 1 # define CAN_USE_THUMB_INSTRUCTIONS 1 @@ -403,6 +407,7 @@ const uint32_t kVFPOverflowExceptionBit = 1 << 2; const uint32_t kVFPUnderflowExceptionBit = 1 << 3; const uint32_t kVFPInexactExceptionBit = 1 << 4; const uint32_t kVFPFlushToZeroMask = 1 << 24; +const uint32_t kVFPDefaultNaNModeControlBit = 1 << 25; const uint32_t kVFPNConditionFlagBit = 1 << 31; const uint32_t kVFPZConditionFlagBit = 1 << 30; diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 9bcc1ac14..25ad85c4b 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -117,45 +117,39 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( static const int32_t kBranchBeforeInterrupt = 0x5a000004; - -void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, - Address pc_after, - Code* check_code, - Code* replacement_code) { - const int kInstrSize = Assembler::kInstrSize; - // The back edge bookkeeping code matches the pattern: - // - // <decrement profiling counter> - // 2a 00 00 01 bpl ok - // e5 9f c? ?? ldr ip, [pc, <stack guard address>] - // e1 2f ff 3c blx ip - ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); - ASSERT(Assembler::IsLdrPcImmediateOffset( - Assembler::instr_at(pc_after - 2 * kInstrSize))); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - - // We patch the code to the following form: - // - // <decrement profiling counter> - // e1 a0 00 00 mov r0, r0 (NOP) - // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] - // e1 2f ff 3c blx ip - // and overwrite the constant containing the - // address of the stack check stub. - - // Replace conditional jump with NOP. +// The back edge bookkeeping code matches the pattern: +// +// <decrement profiling counter> +// 2a 00 00 01 bpl ok +// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>] +// e1 2f ff 3c blx ip +// ok-label +// +// We patch the code to the following form: +// +// <decrement profiling counter> +// e1 a0 00 00 mov r0, r0 (NOP) +// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] +// e1 2f ff 3c blx ip +// ok-label + +void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, + Address pc_after, + Code* interrupt_code, + Code* replacement_code) { + ASSERT(!InterruptCodeIsPatched(unoptimized_code, + pc_after, + interrupt_code, + replacement_code)); + static const int kInstrSize = Assembler::kInstrSize; + // Turn the jump into nops. CodePatcher patcher(pc_after - 3 * kInstrSize, 1); patcher.masm()->nop(); - - // Replace the stack check address in the constant pool - // with the entry address of the replacement code. - uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - + // Replace the call address. + uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff; - Address stack_check_address_pointer = pc_after + stack_check_address_offset; - ASSERT(Memory::uint32_at(stack_check_address_pointer) == - reinterpret_cast<uint32_t>(check_code->entry())); - Memory::uint32_at(stack_check_address_pointer) = + Address interrupt_address_pointer = pc_after + interrupt_address_offset; + Memory::uint32_at(interrupt_address_pointer) = reinterpret_cast<uint32_t>(replacement_code->entry()); unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( @@ -163,34 +157,61 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, } -void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, +void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, + Address pc_after, + Code* interrupt_code, + Code* replacement_code) { + ASSERT(InterruptCodeIsPatched(unoptimized_code, + pc_after, + interrupt_code, + replacement_code)); + static const int kInstrSize = Assembler::kInstrSize; + // Restore the original jump. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later. + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); + // Restore the original call address. + uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address interrupt_address_pointer = pc_after + interrupt_address_offset; + Memory::uint32_at(interrupt_address_pointer) = + reinterpret_cast<uint32_t>(interrupt_code->entry()); + + interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code); +} + + +#ifdef DEBUG +bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code, Address pc_after, - Code* check_code, + Code* interrupt_code, Code* replacement_code) { - const int kInstrSize = Assembler::kInstrSize; + static const int kInstrSize = Assembler::kInstrSize; ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); - ASSERT(Assembler::IsLdrPcImmediateOffset( - Assembler::instr_at(pc_after - 2 * kInstrSize))); - // Replace NOP with conditional jump. - CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - patcher.masm()->b(+16, pl); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); + uint32_t interrupt_address_offset = + Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff; + Address interrupt_address_pointer = pc_after + interrupt_address_offset; - // Replace the stack check address in the constant pool - // with the entry address of the replacement code. - uint32_t stack_check_address_offset = Memory::uint16_at(pc_after - - 2 * kInstrSize) & 0xfff; - Address stack_check_address_pointer = pc_after + stack_check_address_offset; - ASSERT(Memory::uint32_at(stack_check_address_pointer) == - reinterpret_cast<uint32_t>(replacement_code->entry())); - Memory::uint32_at(stack_check_address_pointer) = - reinterpret_cast<uint32_t>(check_code->entry()); - - check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 2 * kInstrSize, check_code); + if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) { + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_after - 2 * kInstrSize))); + ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) == + Memory::uint32_at(interrupt_address_pointer)); + return true; + } else { + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_after - 2 * kInstrSize))); + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); + ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) == + Memory::uint32_at(interrupt_address_pointer)); + return false; + } } +#endif // DEBUG static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) { @@ -594,23 +615,18 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - // Save all allocatable VFP registers before messing with them. - ASSERT(kDoubleRegZero.code() == 14); - ASSERT(kScratchDoubleReg.code() == 15); + // Save all allocatable VFP registers before messing with them. + ASSERT(kDoubleRegZero.code() == 14); + ASSERT(kScratchDoubleReg.code() == 15); - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(ip); + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(ip); - // Push registers d0-d13, and possibly d16-d31, on the stack. - // If d16-d31 are not pushed, decrease the stack pointer instead. - __ vstm(db_w, sp, d16, d31, ne); - __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); - __ vstm(db_w, sp, d0, d13); - } else { - __ sub(sp, sp, Operand(kDoubleRegsSize)); - } + // Push registers d0-d13, and possibly d16-d31, on the stack. + // If d16-d31 are not pushed, decrease the stack pointer instead. + __ vstm(db_w, sp, d16, d31, ne); + __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); + __ vstm(db_w, sp, d0, d13); // Push all 16 registers (needed to populate FrameDescription::registers_). // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps @@ -669,17 +685,14 @@ void Deoptimizer::EntryGenerator::Generate() { __ str(r2, MemOperand(r1, offset)); } - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - // Copy VFP registers to - // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] - int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; - __ vldr(d0, sp, src_offset); - __ vstr(d0, r1, dst_offset); - } + // Copy VFP registers to + // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] + int double_regs_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; + __ vldr(d0, sp, src_offset); + __ vstr(d0, r1, dst_offset); } // Remove the bailout id, eventually return address, and the saved registers @@ -749,21 +762,18 @@ void Deoptimizer::EntryGenerator::Generate() { __ cmp(r4, r1); __ b(lt, &outer_push_loop); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(ip); + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(ip); - __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); - int src_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { - if (i == kDoubleRegZero.code()) continue; - if (i == kScratchDoubleReg.code()) continue; + __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); + int src_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { + if (i == kDoubleRegZero.code()) continue; + if (i == kScratchDoubleReg.code()) continue; - const DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vldr(reg, r1, src_offset, i < 16 ? al : ne); - src_offset += kDoubleSize; - } + const DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vldr(reg, r1, src_offset, i < 16 ? al : ne); + src_offset += kDoubleSize; } // Push state, pc, and continuation from the last output frame. diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index dec62b341..b84d35535 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -1561,8 +1561,9 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) { buffer[0] = '\0'; byte* prev_pc = pc; pc += d.InstructionDecode(buffer, pc); - fprintf(f, "%p %08x %s\n", - prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start()); + v8::internal::PrintF( + f, "%p %08x %s\n", + prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start()); } } diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 30f4057fa..19b29b855 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -100,18 +100,6 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; // ---------------------------------------------------- -class StackHandlerConstants : public AllStatic { - public: - static const int kNextOffset = 0 * kPointerSize; - static const int kCodeOffset = 1 * kPointerSize; - static const int kStateOffset = 2 * kPointerSize; - static const int kContextOffset = 3 * kPointerSize; - static const int kFPOffset = 4 * kPointerSize; - - static const int kSize = kFPOffset + kPointerSize; -}; - - class EntryFrameConstants : public AllStatic { public: static const int kCallerFPOffset = -3 * kPointerSize; diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 6086645db..ba0f14128 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -162,8 +162,6 @@ void FullCodeGenerator::Generate() { // the frame (that is done below). FrameScope frame_scope(masm_, StackFrame::MANUAL); - int locals_count = info->scope()->num_stack_slots(); - info->set_prologue_offset(masm_->pc_offset()); { PredictableCodeSizeScope predictible_code_size_scope( @@ -179,6 +177,9 @@ void FullCodeGenerator::Generate() { } { Comment cmnt(masm_, "[ Allocate locals"); + int locals_count = info->scope()->num_stack_slots(); + // Generators allocate locals, if any, in context slots. + ASSERT(!info->function()->is_generator() || locals_count == 0); for (int i = 0; i < locals_count; i++) { __ push(ip); } @@ -313,7 +314,7 @@ void FullCodeGenerator::Generate() { EmitReturnSequence(); // Force emit the constant pool, so it doesn't get emitted in the middle - // of the stack check table. + // of the back edge table. masm()->CheckConstPool(true, false); } @@ -350,7 +351,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() { void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, Label* back_edge_target) { Comment cmnt(masm_, "[ Back edge bookkeeping"); - // Block literal pools whilst emitting stack check code. + // Block literal pools whilst emitting back edge code. Assembler::BlockConstPoolScope block_const_pool(masm_); Label ok; @@ -1268,7 +1269,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, !pretenure && scope()->is_function_scope() && info->num_literals() == 0) { - FastNewClosureStub stub(info->language_mode()); + FastNewClosureStub stub(info->language_mode(), info->is_generator()); __ mov(r0, Operand(info)); __ push(r0); __ CallStub(&stub); @@ -1562,7 +1563,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { // r0: Newly allocated regexp. // r5: Materialized regexp. // r2: temp. - __ CopyFields(r0, r5, r2.bit(), size / kPointerSize); + __ CopyFields(r0, r5, d0, s0, size / kPointerSize); context()->Plug(r0); } @@ -1727,7 +1728,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r1, Operand(constant_elements)); - __ Push(r3, r2, r1); if (has_fast_elements && constant_elements_values->map() == isolate()->heap()->fixed_cow_array_map()) { FastCloneShallowArrayStub stub( @@ -1738,8 +1738,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ IncrementCounter( isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2); } else if (expr->depth() > 1) { + __ Push(r3, r2, r1); __ CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { + } else if (Serializer::enabled() || + length > FastCloneShallowArrayStub::kMaximumClonedLength) { + __ Push(r3, r2, r1); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || @@ -3024,37 +3027,26 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { // Convert 32 random bits in r0 to 0.(32 random bits) in a double // by computing: // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). - if (CpuFeatures::IsSupported(VFP2)) { - __ PrepareCallCFunction(1, r0); - __ ldr(r0, - ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); - __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); - - CpuFeatureScope scope(masm(), VFP2); - // 0x41300000 is the top half of 1.0 x 2^20 as a double. - // Create this constant using mov/orr to avoid PC relative load. - __ mov(r1, Operand(0x41000000)); - __ orr(r1, r1, Operand(0x300000)); - // Move 0x41300000xxxxxxxx (x = random bits) to VFP. - __ vmov(d7, r0, r1); - // Move 0x4130000000000000 to VFP. - __ mov(r0, Operand::Zero()); - __ vmov(d8, r0, r1); - // Subtract and store the result in the heap number. - __ vsub(d7, d7, d8); - __ sub(r0, r4, Operand(kHeapObjectTag)); - __ vstr(d7, r0, HeapNumber::kValueOffset); - __ mov(r0, r4); - } else { - __ PrepareCallCFunction(2, r0); - __ ldr(r1, - ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); - __ mov(r0, Operand(r4)); - __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset)); - __ CallCFunction( - ExternalReference::fill_heap_number_with_random_function(isolate()), 2); - } + __ PrepareCallCFunction(1, r0); + __ ldr(r0, + ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); + __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); + + // 0x41300000 is the top half of 1.0 x 2^20 as a double. + // Create this constant using mov/orr to avoid PC relative load. + __ mov(r1, Operand(0x41000000)); + __ orr(r1, r1, Operand(0x300000)); + // Move 0x41300000xxxxxxxx (x = random bits) to VFP. + __ vmov(d7, r0, r1); + // Move 0x4130000000000000 to VFP. + __ mov(r0, Operand::Zero()); + __ vmov(d8, r0, r1); + // Subtract and store the result in the heap number. + __ vsub(d7, d7, d8); + __ sub(r0, r4, Operand(kHeapObjectTag)); + __ vstr(d7, r0, HeapNumber::kValueOffset); + __ mov(r0, r4); context()->Plug(r0); } @@ -3191,12 +3183,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { ASSERT(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - if (CpuFeatures::IsSupported(VFP2)) { - MathPowStub stub(MathPowStub::ON_STACK); - __ CallStub(&stub); - } else { - __ CallRuntime(Runtime::kMath_pow, 2); - } + MathPowStub stub(MathPowStub::ON_STACK); + __ CallStub(&stub); context()->Plug(r0); } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index f2b65efe8..f6029b514 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -302,17 +302,6 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) { } -void LUnaryMathOperation::PrintDataTo(StringStream* stream) { - stream->Add("/%s ", hydrogen()->OpName()); - value()->PrintTo(stream); -} - - -void LMathExp::PrintDataTo(StringStream* stream) { - value()->PrintTo(stream); -} - - void LLoadContextSlot::PrintDataTo(StringStream* stream) { context()->PrintTo(stream); stream->Add("[%d]", slot_index()); @@ -1124,47 +1113,101 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - BuiltinFunctionId op = instr->op(); - if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) { - LOperand* input = UseFixedDouble(instr->value(), d2); - LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL); - return MarkAsCall(DefineFixedDouble(result, d2), instr); - } else if (op == kMathExp) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); - LOperand* input = UseTempRegister(instr->value()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll. - LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); - return DefineAsRegister(result); - } else if (op == kMathPowHalf) { - LOperand* input = UseFixedDouble(instr->value(), d2); - LOperand* temp = FixedTemp(d3); - LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); - return DefineFixedDouble(result, d2); - } else { - LOperand* input = UseRegister(instr->value()); - - LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL; - LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); - switch (op) { - case kMathAbs: - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - case kMathFloor: - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - case kMathSqrt: - return DefineAsRegister(result); - case kMathRound: - return AssignEnvironment(DefineAsRegister(result)); - default: - UNREACHABLE(); - return NULL; - } + switch (instr->op()) { + case kMathFloor: return DoMathFloor(instr); + case kMathRound: return DoMathRound(instr); + case kMathAbs: return DoMathAbs(instr); + case kMathLog: return DoMathLog(instr); + case kMathSin: return DoMathSin(instr); + case kMathCos: return DoMathCos(instr); + case kMathTan: return DoMathTan(instr); + case kMathExp: return DoMathExp(instr); + case kMathSqrt: return DoMathSqrt(instr); + case kMathPowHalf: return DoMathPowHalf(instr); + default: + UNREACHABLE(); + return NULL; } } +LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathFloor* result = new(zone()) LMathFloor(input); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); +} + + +LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LOperand* temp = FixedTemp(d3); + LMathRound* result = new(zone()) LMathRound(input, temp); + return AssignEnvironment(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathAbs* result = new(zone()) LMathAbs(input); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); +} + + +LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { + LOperand* input = UseFixedDouble(instr->value(), d2); + LMathLog* result = new(zone()) LMathLog(input); + return MarkAsCall(DefineFixedDouble(result, d2), instr); +} + + +LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) { + LOperand* input = UseFixedDouble(instr->value(), d2); + LMathSin* result = new(zone()) LMathSin(input); + return MarkAsCall(DefineFixedDouble(result, d2), instr); +} + + +LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) { + LOperand* input = UseFixedDouble(instr->value(), d2); + LMathCos* result = new(zone()) LMathCos(input); + return MarkAsCall(DefineFixedDouble(result, d2), instr); +} + + +LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) { + LOperand* input = UseFixedDouble(instr->value(), d2); + LMathTan* result = new(zone()) LMathTan(input); + return MarkAsCall(DefineFixedDouble(result, d2), instr); +} + + +LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->value()->representation().IsDouble()); + LOperand* input = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll. + LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { + LOperand* input = UseRegister(instr->value()); + LMathSqrt* result = new(zone()) LMathSqrt(input); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { + LOperand* input = UseFixedDouble(instr->value(), d2); + LOperand* temp = FixedTemp(d3); + LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp); + return DefineFixedDouble(result, d2); +} + + LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { ASSERT(instr->key()->representation().IsTagged()); argument_count_ -= instr->argument_count(); @@ -1933,7 +1976,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { LUnallocated* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); - return AssignEnvironment(Define(result, temp1)); + return AssignEnvironment(result); } @@ -2133,16 +2176,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { (instr->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - // float->double conversion on non-VFP2 requires an extra scratch - // register. For convenience, just mark the elements register as "UseTemp" - // so that it can be used as a temp during the float->double conversion - // after it's no longer needed after the float load. - bool needs_temp = - !CpuFeatures::IsSupported(VFP2) && - (elements_kind == EXTERNAL_FLOAT_ELEMENTS); - LOperand* external_pointer = needs_temp - ? UseTempRegister(instr->elements()) - : UseRegister(instr->elements()); + LOperand* external_pointer = UseRegister(instr->elements()); result = new(zone()) LLoadKeyed(external_pointer, key); } @@ -2341,11 +2375,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { } -LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { - return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr); -} - - LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 207faf46e..6486cad2b 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -97,7 +97,6 @@ class LCodeGen; V(DoubleToI) \ V(DummyUse) \ V(ElementsKind) \ - V(FastLiteral) \ V(FixedArrayBaseLength) \ V(FunctionLiteral) \ V(GetCachedArrayIndex) \ @@ -134,9 +133,18 @@ class LCodeGen; V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ V(MapEnumLength) \ + V(MathAbs) \ + V(MathCos) \ V(MathExp) \ + V(MathFloor) \ V(MathFloorOfDiv) \ + V(MathLog) \ V(MathMinMax) \ + V(MathPowHalf) \ + V(MathRound) \ + V(MathSin) \ + V(MathSqrt) \ + V(MathTan) \ V(ModI) \ V(MulI) \ V(MultiplyAddD) \ @@ -181,7 +189,6 @@ class LCodeGen; V(TrapAllocationMemento) \ V(Typeof) \ V(TypeofIsAndBranch) \ - V(UnaryMathOperation) \ V(UnknownOSRValue) \ V(ValueOf) \ V(ForInPrepareMap) \ @@ -703,9 +710,22 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { }; -class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { +class LMathFloor: public LTemplateInstruction<1, 1, 0> { public: - LUnaryMathOperation(LOperand* value, LOperand* temp) { + explicit LMathFloor(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathRound: public LTemplateInstruction<1, 1, 1> { + public: + LMathRound(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } @@ -713,11 +733,69 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { LOperand* value() { return inputs_[0]; } LOperand* temp() { return temps_[0]; } - DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation") + DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; - virtual void PrintDataTo(StringStream* stream); - BuiltinFunctionId op() const { return hydrogen()->op(); } + +class LMathAbs: public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathAbs(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) +}; + + +class LMathLog: public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathLog(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log") +}; + + +class LMathSin: public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathSin(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin") +}; + + +class LMathCos: public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathCos(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos") +}; + + +class LMathTan: public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathTan(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan") }; @@ -740,8 +818,32 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> { LOperand* double_temp() { return temps_[2]; } DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") +}; - virtual void PrintDataTo(StringStream* stream); + +class LMathSqrt: public LTemplateInstruction<1, 1, 0> { + public: + explicit LMathSqrt(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt") +}; + + +class LMathPowHalf: public LTemplateInstruction<1, 1, 1> { + public: + LMathPowHalf(LOperand* value, LOperand* temp) { + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half") }; @@ -1310,7 +1412,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> { LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } - DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max") + DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max") DECLARE_HYDROGEN_ACCESSOR(MathMinMax) }; @@ -2074,7 +2176,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) virtual void PrintDataTo(StringStream* stream); - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + bool NeedsCanonicalization() { + if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() || + hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) { + return false; + } + return hydrogen()->NeedsCanonicalization(); + } uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -2239,7 +2347,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { }; -class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> { +class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { public: LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) { temps_[0] = temp; @@ -2355,13 +2463,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> { }; -class LFastLiteral: public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") - DECLARE_HYDROGEN_ACCESSOR(FastLiteral) -}; - - class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") @@ -2611,6 +2712,17 @@ class LChunkBuilder BASE_EMBEDDED { static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val); static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val); + LInstruction* DoMathFloor(HUnaryMathOperation* instr); + LInstruction* DoMathRound(HUnaryMathOperation* instr); + LInstruction* DoMathAbs(HUnaryMathOperation* instr); + LInstruction* DoMathLog(HUnaryMathOperation* instr); + LInstruction* DoMathSin(HUnaryMathOperation* instr); + LInstruction* DoMathCos(HUnaryMathOperation* instr); + LInstruction* DoMathTan(HUnaryMathOperation* instr); + LInstruction* DoMathExp(HUnaryMathOperation* instr); + LInstruction* DoMathSqrt(HUnaryMathOperation* instr); + LInstruction* DoMathPowHalf(HUnaryMathOperation* instr); + private: enum Status { UNUSED, diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 7bb3535ff..a19015d80 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -113,7 +113,7 @@ void LCodeGen::Comment(const char* format, ...) { // issues when the stack allocated buffer goes out of scope. size_t length = builder.position(); Vector<char> copy = Vector<char>::New(length + 1); - memcpy(copy.start(), builder.Finalize(), copy.length()); + OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); masm()->RecordComment(copy.start()); } @@ -195,8 +195,7 @@ bool LCodeGen::GeneratePrologue() { } } - if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); + if (info()->saves_caller_doubles()) { Comment(";;; Save clobbered callee double registers"); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); @@ -852,7 +851,9 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { } ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. - if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) { + if (FLAG_deopt_every_n_times == 1 && + !info()->IsStub() && + info()->opt_count() == id) { __ Jump(entry, RelocInfo::RUNTIME_ENTRY); return; } @@ -1209,8 +1210,6 @@ void LCodeGen::DoModI(LModI* instr) { Label vfp_modulo, both_positive, right_negative; - CpuFeatureScope scope(masm(), VFP2); - // Check for x % 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { __ cmp(right, Operand::Zero()); @@ -1615,7 +1614,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, LOperand* left_argument, LOperand* right_argument, Token::Value op) { - CpuFeatureScope vfp_scope(masm(), VFP2); Register left = ToRegister(left_argument); Register right = ToRegister(right_argument); @@ -1901,7 +1899,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) { void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); - CpuFeatureScope scope(masm(), VFP2); double v = instr->value(); __ Vmov(result, v, scratch0()); } @@ -2072,7 +2069,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister left_reg = ToDoubleRegister(left); DwVfpRegister right_reg = ToDoubleRegister(right); DwVfpRegister result_reg = ToDoubleRegister(instr->result()); @@ -2118,7 +2114,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister left = ToDoubleRegister(instr->left()); DwVfpRegister right = ToDoubleRegister(instr->right()); DwVfpRegister result = ToDoubleRegister(instr->result()); @@ -2209,7 +2204,6 @@ void LCodeGen::DoBranch(LBranch* instr) { __ cmp(reg, Operand::Zero()); EmitBranch(true_block, false_block, ne); } else if (r.IsDouble()) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); @@ -2301,7 +2295,6 @@ void LCodeGen::DoBranch(LBranch* instr) { } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { - CpuFeatureScope scope(masm(), VFP2); // heap number -> false iff +0, -0, or NaN. DwVfpRegister dbl_scratch = double_scratch0(); Label not_heap_number; @@ -2381,7 +2374,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { - CpuFeatureScope scope(masm(), VFP2); // Compare left and right operands as doubles and load the // resulting flags into the normal status register. __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); @@ -2936,8 +2928,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ push(r0); __ CallRuntime(Runtime::kTraceExit, 1); } - if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); + if (info()->saves_caller_doubles()) { ASSERT(NeedsEagerFrame()); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -3319,58 +3310,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ? Operand(constant_key << element_size_shift) : Operand(key, LSL, shift_size); __ add(scratch0(), external_pointer, operand); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset); - __ vcvt_f64_f32(result, kScratchDoubleReg.low()); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), additional_offset); - } - } else { - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - Register value = external_pointer; - __ ldr(value, MemOperand(scratch0(), additional_offset)); - __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask)); - - __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits)); - __ and_(scratch0(), scratch0(), - Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); - - Label exponent_rebiased; - __ teq(scratch0(), Operand(0x00)); - __ b(eq, &exponent_rebiased); - - __ teq(scratch0(), Operand(0xff)); - __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq); - __ b(eq, &exponent_rebiased); - - // Rebias exponent. - __ add(scratch0(), - scratch0(), - Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); - - __ bind(&exponent_rebiased); - __ and_(sfpd_hi, value, Operand(kBinary32SignMask)); - __ orr(sfpd_hi, sfpd_hi, - Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord)); - - // Shift mantissa. - static const int kMantissaShiftForHiWord = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaShiftForLoWord = - kBitsPerInt - kMantissaShiftForHiWord; - - __ orr(sfpd_hi, sfpd_hi, - Operand(sfpd_lo, LSR, kMantissaShiftForHiWord)); - __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord)); - - } else { - __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset)); - __ ldr(sfpd_hi, MemOperand(scratch0(), - additional_offset + kPointerSize)); - } + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset); + __ vcvt_f64_f32(result, kScratchDoubleReg.low()); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ vldr(result, scratch0(), additional_offset); } } else { Register result = ToRegister(instr->result()); @@ -3444,23 +3388,12 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { if (!key_is_constant) { __ add(elements, elements, Operand(key, LSL, shift_size)); } - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ add(elements, elements, Operand(base_offset)); - __ vldr(result, elements, 0); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); - } - } else { - __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); - __ ldr(sfpd_lo, MemOperand(elements, base_offset)); - if (instr->hydrogen()->RequiresHoleCheck()) { - ASSERT(kPointerSize == sizeof(kHoleNanLower32)); - __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); - } + __ add(elements, elements, Operand(base_offset)); + __ vldr(result, elements, 0); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); } } @@ -3821,7 +3754,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { } -void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { +void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3887,7 +3820,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { } -void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { +void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); __ cmp(input, Operand::Zero()); @@ -3901,20 +3834,18 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); +void LCodeGen::DoMathAbs(LMathAbs* instr) { // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { public: - DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, - LUnaryMathOperation* instr) + DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } virtual LInstruction* instr() { return instr_; } private: - LUnaryMathOperation* instr_; + LMathAbs* instr_; }; Representation r = instr->hydrogen()->value()->representation(); @@ -3938,8 +3869,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); +void LCodeGen::DoMathFloor(LMathFloor* instr) { DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); Register input_high = scratch0(); @@ -3961,8 +3891,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); +void LCodeGen::DoMathRound(LMathRound* instr) { DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); @@ -4001,16 +3930,14 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); +void LCodeGen::DoMathSqrt(LMathSqrt* instr) { DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); __ vsqrt(result, input); } -void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); +void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister temp = ToDoubleRegister(instr->temp()); @@ -4032,7 +3959,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoPower(LPower* instr) { - CpuFeatureScope scope(masm(), VFP2); Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. @@ -4065,7 +3991,6 @@ void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoRandom(LRandom* instr) { - CpuFeatureScope scope(masm(), VFP2); class DeferredDoRandom: public LDeferredCode { public: DeferredDoRandom(LCodeGen* codegen, LRandom* instr) @@ -4144,7 +4069,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { void LCodeGen::DoMathExp(LMathExp* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); @@ -4158,7 +4082,7 @@ void LCodeGen::DoMathExp(LMathExp* instr) { } -void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { +void LCodeGen::DoMathLog(LMathLog* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::LOG, TranscendentalCacheStub::UNTAGGED); @@ -4166,7 +4090,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { +void LCodeGen::DoMathTan(LMathTan* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); @@ -4174,7 +4098,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { +void LCodeGen::DoMathCos(LMathCos* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); @@ -4182,7 +4106,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { } -void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { +void LCodeGen::DoMathSin(LMathSin* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); @@ -4190,42 +4114,6 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { } -void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { - switch (instr->op()) { - case kMathAbs: - DoMathAbs(instr); - break; - case kMathFloor: - DoMathFloor(instr); - break; - case kMathRound: - DoMathRound(instr); - break; - case kMathSqrt: - DoMathSqrt(instr); - break; - case kMathPowHalf: - DoMathPowHalf(instr); - break; - case kMathCos: - DoMathCos(instr); - break; - case kMathSin: - DoMathSin(instr); - break; - case kMathTan: - DoMathTan(instr); - break; - case kMathLog: - DoMathLog(instr); - break; - default: - Abort("Unimplemented type of LUnaryMathOperation."); - UNREACHABLE(); - } -} - - void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(instr->HasPointerMap()); @@ -4442,7 +4330,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - CpuFeatureScope scope(masm(), VFP2); Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); @@ -4463,7 +4350,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatureScope scope(masm(), VFP3); DwVfpRegister value(ToDoubleRegister(instr->value())); Operand operand(key_is_constant ? Operand(constant_key << element_size_shift) @@ -4513,7 +4399,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister value = ToDoubleRegister(instr->value()); Register elements = ToRegister(instr->elements()); Register key = no_reg; @@ -4545,18 +4430,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { } if (instr->NeedsCanonicalization()) { - // Check for NaN. All NaNs must be canonicalized. - __ VFPCompareAndSetFlags(value, value); - Label after_canonicalization; - - // Only load canonical NaN if the comparison above set the overflow. - __ b(vc, &after_canonicalization); - __ Vmov(value, - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - - __ bind(&after_canonicalization); + // Force a canonical NaN. + if (masm()->emit_debug_code()) { + __ vmrs(ip); + __ tst(ip, Operand(kVFPDefaultNaNModeControlBit)); + __ Assert(ne, "Default NaN mode not set"); + } + __ VFPCanonicalizeNaN(value); } - __ vstr(value, scratch, instr->additional_index() << element_size_shift); } @@ -4814,7 +4695,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4832,7 +4712,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4894,43 +4773,6 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { } -// Convert unsigned integer with specified number of leading zeroes in binary -// representation to IEEE 754 double. -// Integer to convert is passed in register src. -// Resulting double is returned in registers hiword:loword. -// This functions does not work correctly for 0. -static void GenerateUInt2Double(MacroAssembler* masm, - Register src, - Register hiword, - Register loword, - Register scratch, - int leading_zeroes) { - const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; - const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; - - const int mantissa_shift_for_hi_word = - meaningful_bits - HeapNumber::kMantissaBitsInTopWord; - const int mantissa_shift_for_lo_word = - kBitsPerInt - mantissa_shift_for_hi_word; - masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); - if (mantissa_shift_for_hi_word > 0) { - masm->mov(loword, Operand(src, LSL, mantissa_shift_for_lo_word)); - masm->orr(hiword, scratch, - Operand(src, LSR, mantissa_shift_for_hi_word)); - } else { - masm->mov(loword, Operand::Zero()); - masm->orr(hiword, scratch, - Operand(src, LSL, -mantissa_shift_for_hi_word)); - } - - // If least significant bit of biased exponent was not 1 it was corrupted - // by most significant bit of mantissa so we should fix that. - if (!(biased_exponent & 1)) { - masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); - } -} - - void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, LOperand* value, IntegerSignedness signedness) { @@ -4952,35 +4794,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ SmiUntag(src, dst); __ eor(src, src, Operand(0x80000000)); } - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vmov(flt_scratch, src); - __ vcvt_f64_s32(dbl_scratch, flt_scratch); - } else { - FloatingPointHelper::Destination dest = - FloatingPointHelper::kCoreRegisters; - FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0, - sfpd_lo, sfpd_hi, - scratch0(), s0); - } + __ vmov(flt_scratch, src); + __ vcvt_f64_s32(dbl_scratch, flt_scratch); } else { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vmov(flt_scratch, src); - __ vcvt_f64_u32(dbl_scratch, flt_scratch); - } else { - Label no_leading_zero, convert_done; - __ tst(src, Operand(0x80000000)); - __ b(ne, &no_leading_zero); - - // Integer has one leading zeros. - GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 1); - __ b(&convert_done); - - __ bind(&no_leading_zero); - GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 0); - __ bind(&convert_done); - } + __ vmov(flt_scratch, src); + __ vcvt_f64_u32(dbl_scratch, flt_scratch); } if (FLAG_inline_new) { @@ -4996,30 +4814,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // TODO(3095996): Put a valid pointer value in the stack slot where the result // register is stored, as this register is in the pointer map, but contains an // integer value. - if (!CpuFeatures::IsSupported(VFP2)) { - // Preserve sfpd_lo. - __ mov(r9, sfpd_lo); - } __ mov(ip, Operand::Zero()); __ StoreToSafepointRegisterSlot(ip, dst); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); __ Move(dst, r0); - if (!CpuFeatures::IsSupported(VFP2)) { - // Restore sfpd_lo. - __ mov(sfpd_lo, r9); - } __ sub(dst, dst, Operand(kHeapObjectTag)); // Done. Put the value in dbl_scratch into the value of the allocated heap // number. __ bind(&done); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); - } else { - __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); - __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset)); - } + __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); __ add(dst, dst, Operand(kHeapObjectTag)); __ StoreToSafepointRegisterSlot(dst, dst); } @@ -5052,45 +4856,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Label no_special_nan_handling; Label done; if (convert_hole) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - DwVfpRegister input_reg = ToDoubleRegister(instr->value()); - __ VFPCompareAndSetFlags(input_reg, input_reg); - __ b(vc, &no_special_nan_handling); - __ vmov(reg, scratch0(), input_reg); - __ cmp(scratch0(), Operand(kHoleNanUpper32)); - Label canonicalize; - __ b(ne, &canonicalize); - __ Move(reg, factory()->the_hole_value()); - __ b(&done); - __ bind(&canonicalize); - __ Vmov(input_reg, - FixedDoubleArray::canonical_not_the_hole_nan_as_double(), - no_reg); - } else { - Label not_hole; - __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); - __ b(ne, ¬_hole); - __ Move(reg, factory()->the_hole_value()); - __ b(&done); - __ bind(¬_hole); - __ and_(scratch, sfpd_hi, Operand(0x7ff00000)); - __ cmp(scratch, Operand(0x7ff00000)); - __ b(ne, &no_special_nan_handling); - Label special_nan_handling; - __ tst(sfpd_hi, Operand(0x000FFFFF)); - __ b(ne, &special_nan_handling); - __ cmp(sfpd_lo, Operand(0)); - __ b(eq, &no_special_nan_handling); - __ bind(&special_nan_handling); - double canonical_nan = - FixedDoubleArray::canonical_not_the_hole_nan_as_double(); - uint64_t casted_nan = BitCast<uint64_t>(canonical_nan); - __ mov(sfpd_lo, - Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF))); - __ mov(sfpd_hi, - Operand(static_cast<uint32_t>(casted_nan >> 32))); - } + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); + __ VFPCompareAndSetFlags(input_reg, input_reg); + __ b(vc, &no_special_nan_handling); + __ vmov(scratch, input_reg.high()); + __ cmp(scratch, Operand(kHoleNanUpper32)); + // If not the hole NaN, force the NaN to be canonical. + __ VFPCanonicalizeNaN(input_reg, ne); + __ b(ne, &no_special_nan_handling); + __ Move(reg, factory()->the_hole_value()); + __ b(&done); } __ bind(&no_special_nan_handling); @@ -5104,13 +4879,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { __ jmp(deferred->entry()); } __ bind(deferred->exit()); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vstr(input_reg, reg, HeapNumber::kValueOffset); - } else { - __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); - __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); - } + __ vstr(input_reg, reg, HeapNumber::kValueOffset); // Now that we have finished with the object's real address tag it __ add(reg, reg, Operand(kHeapObjectTag)); __ bind(&done); @@ -5160,7 +4929,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); ASSERT(!result_reg.is(double_scratch0())); - CpuFeatureScope scope(masm(), VFP2); Label load_smi, heap_number, done; @@ -5249,7 +5017,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ cmp(scratch1, Operand(ip)); if (instr->truncating()) { - CpuFeatureScope scope(masm(), VFP2); Register scratch3 = ToRegister(instr->temp2()); ASSERT(!scratch3.is(input_reg) && !scratch3.is(scratch1) && @@ -5270,11 +5037,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); - __ ECMAToInt32VFP(input_reg, double_scratch2, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(input_reg, double_scratch2, double_scratch, + scratch1, scratch2, scratch3); } else { - CpuFeatureScope scope(masm(), VFP3); // Deoptimize if we don't have a heap number. DeoptimizeIf(ne, instr->environment()); @@ -5369,8 +5135,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); - __ ECMAToInt32VFP(result_reg, double_input, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(result_reg, double_input, double_scratch, + scratch1, scratch2, scratch3); } else { __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); // Deoptimize if the input wasn't a int32 (inside a double). @@ -5486,7 +5252,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - CpuFeatureScope vfp_scope(masm(), VFP2); DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); @@ -5495,7 +5260,6 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - CpuFeatureScope scope(masm(), VFP2); Register unclamped_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampUint8(result_reg, unclamped_reg); @@ -5503,7 +5267,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - CpuFeatureScope scope(masm(), VFP2); Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); @@ -5541,7 +5304,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { - ASSERT(instr->temp()->Equals(instr->result())); Register prototype_reg = ToRegister(instr->temp()); Register map_reg = ToRegister(instr->temp2()); @@ -5554,8 +5316,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { for (int i = 0; i < maps->length(); i++) { prototype_maps_.Add(maps->at(i), info()->zone()); } - __ LoadHeapObject(prototype_reg, - prototypes->at(prototypes->length() - 1)); } else { for (int i = 0; i < prototypes->length(); i++) { __ LoadHeapObject(prototype_reg, prototypes->at(i)); @@ -5671,11 +5431,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) { if (instr->hydrogen()->MustAllocateDoubleAligned()) { flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); } + if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); + } if (instr->size()->IsConstantOperand()) { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { - flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); - } __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); } else { Register size = ToRegister(instr->size()); @@ -5703,7 +5463,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ SmiTag(size, size); __ push(size); - CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); + if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { + CallRuntimeFromDeferred( + Runtime::kAllocateInOldPointerSpace, 1, instr); + } else { + CallRuntimeFromDeferred( + Runtime::kAllocateInNewSpace, 1, instr); + } __ StoreToSafepointRegisterSlot(r0, result); } @@ -5737,7 +5503,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Boilerplate already exists, constant elements are never accessed. // Pass an empty fixed array. __ mov(r1, Operand(isolate()->factory()->empty_fixed_array())); - __ Push(r3, r2, r1); // Pick the right runtime function or stub to call. int length = instr->hydrogen()->length(); @@ -5748,8 +5513,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else if (instr->hydrogen()->depth() > 1) { + __ Push(r3, r2, r1); CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { + __ Push(r3, r2, r1); CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr); } else { FastCloneShallowArrayStub::Mode mode = @@ -5762,170 +5529,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { } -void LCodeGen::EmitDeepCopy(Handle<JSObject> object, - Register result, - Register source, - int* offset, - AllocationSiteMode mode) { - ASSERT(!source.is(r2)); - ASSERT(!result.is(r2)); - - bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE && - object->map()->CanTrackAllocationSite(); - - // Only elements backing stores for non-COW arrays need to be copied. - Handle<FixedArrayBase> elements(object->elements()); - bool has_elements = elements->length() > 0 && - elements->map() != isolate()->heap()->fixed_cow_array_map(); - - // Increase the offset so that subsequent objects end up right after - // this object and its backing store. - int object_offset = *offset; - int object_size = object->map()->instance_size(); - int elements_size = has_elements ? elements->Size() : 0; - int elements_offset = *offset + object_size; - if (create_allocation_site_info) { - elements_offset += AllocationSiteInfo::kSize; - *offset += AllocationSiteInfo::kSize; - } - - *offset += object_size + elements_size; - - // Copy object header. - ASSERT(object->properties()->length() == 0); - int inobject_properties = object->map()->inobject_properties(); - int header_size = object_size - inobject_properties * kPointerSize; - for (int i = 0; i < header_size; i += kPointerSize) { - if (has_elements && i == JSObject::kElementsOffset) { - __ add(r2, result, Operand(elements_offset)); - } else { - __ ldr(r2, FieldMemOperand(source, i)); - } - __ str(r2, FieldMemOperand(result, object_offset + i)); - } - - // Copy in-object properties. - for (int i = 0; i < inobject_properties; i++) { - int total_offset = object_offset + object->GetInObjectPropertyOffset(i); - Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i), - isolate()); - if (value->IsJSObject()) { - Handle<JSObject> value_object = Handle<JSObject>::cast(value); - __ add(r2, result, Operand(*offset)); - __ str(r2, FieldMemOperand(result, total_offset)); - __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset, - DONT_TRACK_ALLOCATION_SITE); - } else if (value->IsHeapObject()) { - __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); - __ str(r2, FieldMemOperand(result, total_offset)); - } else { - __ mov(r2, Operand(value)); - __ str(r2, FieldMemOperand(result, total_offset)); - } - } - - // Build Allocation Site Info if desired - if (create_allocation_site_info) { - __ mov(r2, Operand(Handle<Map>(isolate()->heap()-> - allocation_site_info_map()))); - __ str(r2, FieldMemOperand(result, object_size)); - __ str(source, FieldMemOperand(result, object_size + kPointerSize)); - } - - if (has_elements) { - // Copy elements backing store header. - __ LoadHeapObject(source, elements); - for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { - __ ldr(r2, FieldMemOperand(source, i)); - __ str(r2, FieldMemOperand(result, elements_offset + i)); - } - - // Copy elements backing store content. - int elements_length = has_elements ? elements->length() : 0; - if (elements->IsFixedDoubleArray()) { - Handle<FixedDoubleArray> double_array = - Handle<FixedDoubleArray>::cast(elements); - for (int i = 0; i < elements_length; i++) { - int64_t value = double_array->get_representation(i); - // We only support little endian mode... - int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF); - int32_t value_high = static_cast<int32_t>(value >> 32); - int total_offset = - elements_offset + FixedDoubleArray::OffsetOfElementAt(i); - __ mov(r2, Operand(value_low)); - __ str(r2, FieldMemOperand(result, total_offset)); - __ mov(r2, Operand(value_high)); - __ str(r2, FieldMemOperand(result, total_offset + 4)); - } - } else if (elements->IsFixedArray()) { - Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); - for (int i = 0; i < elements_length; i++) { - int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle<Object> value(fast_elements->get(i), isolate()); - if (value->IsJSObject()) { - Handle<JSObject> value_object = Handle<JSObject>::cast(value); - __ add(r2, result, Operand(*offset)); - __ str(r2, FieldMemOperand(result, total_offset)); - __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset, - DONT_TRACK_ALLOCATION_SITE); - } else if (value->IsHeapObject()) { - __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); - __ str(r2, FieldMemOperand(result, total_offset)); - } else { - __ mov(r2, Operand(value)); - __ str(r2, FieldMemOperand(result, total_offset)); - } - } - } else { - UNREACHABLE(); - } - } -} - - -void LCodeGen::DoFastLiteral(LFastLiteral* instr) { - int size = instr->hydrogen()->total_size(); - ElementsKind boilerplate_elements_kind = - instr->hydrogen()->boilerplate()->GetElementsKind(); - - // Deopt if the array literal boilerplate ElementsKind is of a type different - // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to TERMINAL_FAST_ELEMENTS_KIND. - if (CanTransitionToMoreGeneralFastElementsKind( - boilerplate_elements_kind, true)) { - __ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); - // Load map into r2. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - // Load the map's "bit field 2". - __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset)); - // Retrieve elements_kind from bit field 2. - __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount); - __ cmp(r2, Operand(boilerplate_elements_kind)); - DeoptimizeIf(ne, instr->environment()); - } - - // Allocate all objects that are part of the literal in one big - // allocation. This avoids multiple limit checks. - Label allocated, runtime_allocate; - __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); - __ jmp(&allocated); - - __ bind(&runtime_allocate); - __ mov(r0, Operand(Smi::FromInt(size))); - __ push(r0); - CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); - - __ bind(&allocated); - int offset = 0; - __ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); - EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset, - instr->hydrogen()->allocation_site_mode()); - ASSERT_EQ(size, offset); -} - - void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> constant_properties = @@ -6002,17 +5605,8 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { __ bind(&allocated); // Copy the content into the newly allocated memory. - // (Unroll copy loop once for better throughput). - for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { - __ ldr(r3, FieldMemOperand(r1, i)); - __ ldr(r2, FieldMemOperand(r1, i + kPointerSize)); - __ str(r3, FieldMemOperand(r0, i)); - __ str(r2, FieldMemOperand(r0, i + kPointerSize)); - } - if ((size % (2 * kPointerSize)) != 0) { - __ ldr(r3, FieldMemOperand(r1, size - kPointerSize)); - __ str(r3, FieldMemOperand(r0, size - kPointerSize)); - } + __ CopyFields(r0, r1, double_scratch0(), double_scratch0().low(), + size / kPointerSize); } @@ -6022,7 +5616,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { Handle<SharedFunctionInfo> shared_info = instr->shared_info(); bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && shared_info->num_literals() == 0) { - FastNewClosureStub stub(shared_info->language_mode()); + FastNewClosureStub stub(shared_info->language_mode(), + shared_info->is_generator()); __ mov(r1, Operand(shared_info)); __ push(r1); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 686241db7..3e24dae54 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -137,7 +137,7 @@ class LCodeGen BASE_EMBEDDED { IntegerSignedness signedness); void DoDeferredTaggedToI(LTaggedToI* instr); - void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); + void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); void DoDeferredStackCheck(LStackCheck* instr); void DoDeferredRandom(LRandom* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); @@ -294,17 +294,7 @@ class LCodeGen BASE_EMBEDDED { Register ToRegister(int index) const; DwVfpRegister ToDoubleRegister(int index) const; - // Specific math operations - used from DoUnaryMathOperation. - void EmitIntegerMathAbs(LUnaryMathOperation* instr); - void DoMathAbs(LUnaryMathOperation* instr); - void DoMathFloor(LUnaryMathOperation* instr); - void DoMathRound(LUnaryMathOperation* instr); - void DoMathSqrt(LUnaryMathOperation* instr); - void DoMathPowHalf(LUnaryMathOperation* instr); - void DoMathLog(LUnaryMathOperation* instr); - void DoMathTan(LUnaryMathOperation* instr); - void DoMathCos(LUnaryMathOperation* instr); - void DoMathSin(LUnaryMathOperation* instr); + void EmitIntegerMathAbs(LMathAbs* instr); // Support for recording safepoint and position information. void RecordSafepoint(LPointerMap* pointers, diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index a65ab7e7d..596d58f47 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); @@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); } else if (saved_destination_->IsDoubleStackSlot()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); } else { UNREACHABLE(); @@ -233,8 +229,7 @@ void LGapResolver::EmitMove(int index) { MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsUint12Encodable()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); - // ip is overwritten while saving the value to the destination. + // ip is overwritten while saving the value to the destination. // Therefore we can't use ip. It is OK if the read from the source // destroys ip, since that happens before the value is read. __ vldr(kScratchDoubleReg.low(), source_operand); @@ -272,7 +267,6 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); DwVfpRegister source_register = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { __ vmov(cgen_->ToDoubleRegister(destination), source_register); @@ -282,8 +276,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleStackSlot()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); - MemOperand source_operand = cgen_->ToMemOperand(source); + MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { __ vldr(cgen_->ToDoubleRegister(destination), source_operand); } else { diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index bacf570c3..7df0c0a1f 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -291,8 +291,6 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) { void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); if (!dst.is(src)) { vmov(dst, src); } @@ -775,6 +773,23 @@ void MacroAssembler::Strd(Register src1, Register src2, } +void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { + // If needed, restore wanted bits of FPSCR. + Label fpscr_done; + vmrs(scratch); + tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); + b(ne, &fpscr_done); + orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); + vmsr(scratch); + bind(&fpscr_done); +} + +void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value, + const Condition cond) { + vsub(value, value, kDoubleRegZero, cond); +} + + void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond) { @@ -811,7 +826,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, void MacroAssembler::Vmov(const DwVfpRegister dst, const double imm, const Register scratch) { - ASSERT(IsEnabled(VFP2)); static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value(imm); @@ -873,7 +887,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Optionally save all double registers. if (save_doubles) { - CpuFeatureScope scope(this, VFP2); // Check CPU flags for number of registers, setting the Z condition flag. CheckFor32DRegs(ip); @@ -938,7 +951,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count) { // Optionally restore all double registers. if (save_doubles) { - CpuFeatureScope scope(this, VFP2); // Calculate the stack location of the saved doubles and restore them. const int offset = 2 * kPointerSize; sub(r3, fp, @@ -975,7 +987,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, } void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(dst, d0); } else { @@ -1402,7 +1413,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, // Check the context is a native context. if (emit_debug_code()) { - // TODO(119): avoid push(holder_reg)/pop(holder_reg) // Cannot use ip as a temporary in this verification code. Due to the fact // that ip is clobbered as part of cmp with an object Operand. push(holder_reg); // Temporarily save holder on the stack. @@ -1421,7 +1431,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, // Check the context is a native context. if (emit_debug_code()) { - // TODO(119): avoid push(holder_reg)/pop(holder_reg) // Cannot use ip as a temporary in this verification code. Due to the fact // that ip is clobbered as part of cmp with an object Operand. push(holder_reg); // Temporarily save holder on the stack. @@ -1991,7 +2000,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register scratch4, Label* fail, int elements_offset) { - Label smi_value, maybe_nan, have_double_value, is_nan, done; + Label smi_value, store; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; @@ -2005,73 +2014,28 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, fail, DONT_DO_SMI_CHECK); - // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 - // in the exponent. - mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); - ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); - cmp(exponent_reg, scratch1); - b(ge, &maybe_nan); - - ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - - bind(&have_double_value); - add(scratch1, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - str(mantissa_reg, FieldMemOperand( - scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); - uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + - sizeof(kHoleNanLower32); - str(exponent_reg, FieldMemOperand(scratch1, offset)); - jmp(&done); - - bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - b(gt, &is_nan); - ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - cmp(mantissa_reg, Operand::Zero()); - b(eq, &have_double_value); - bind(&is_nan); - // Load canonical NaN for storing into the double array. - uint64_t nan_int64 = BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); - mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); - jmp(&have_double_value); + vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); + // Force a canonical NaN. + if (emit_debug_code()) { + vmrs(ip); + tst(ip, Operand(kVFPDefaultNaNModeControlBit)); + Assert(ne, "Default NaN mode not set"); + } + VFPCanonicalizeNaN(d0); + b(&store); bind(&smi_value); + Register untagged_value = scratch1; + SmiUntag(untagged_value, value_reg); + FloatingPointHelper::ConvertIntToDouble( + this, untagged_value, FloatingPointHelper::kVFPRegisters, d0, + mantissa_reg, exponent_reg, scratch4, s2); + + bind(&store); add(scratch1, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - - elements_offset)); - add(scratch1, scratch1, Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - // scratch1 is now effective address of the double element - - FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP2)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } - - Register untagged_value = elements_reg; - SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble(this, - untagged_value, - destination, - d0, - mantissa_reg, - exponent_reg, - scratch4, - s2); - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatureScope scope(this, VFP2); - vstr(d0, scratch1, 0); - } else { - str(mantissa_reg, MemOperand(scratch1, 0)); - str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); - } - bind(&done); + vstr(d0, FieldMemOperand(scratch1, + FixedDoubleArray::kHeaderSize - elements_offset)); } @@ -2425,9 +2389,6 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi, void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, DwVfpRegister double_scratch) { ASSERT(!double_input.is(double_scratch)); - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); - vcvt_s32_f64(double_scratch.low(), double_input); vcvt_f64_s32(double_scratch, double_scratch.low()); VFPCompareAndSetFlags(double_input, double_scratch); @@ -2438,9 +2399,6 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result, DwVfpRegister double_input, DwVfpRegister double_scratch) { ASSERT(!double_input.is(double_scratch)); - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); - vcvt_s32_f64(double_scratch.low(), double_input); vmov(result, double_scratch.low()); vcvt_f64_s32(double_scratch, double_scratch.low()); @@ -2456,8 +2414,6 @@ void MacroAssembler::TryInt32Floor(Register result, Label* exact) { ASSERT(!result.is(input_high)); ASSERT(!double_input.is(double_scratch)); - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); Label negative, exception; // Test for NaN and infinities. @@ -2502,26 +2458,18 @@ void MacroAssembler::ECMAConvertNumberToInt32(Register source, Register scratch, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(this, VFP2); - vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); - ECMAToInt32VFP(result, double_scratch1, double_scratch2, - scratch, input_high, input_low); - } else { - Ldrd(input_low, input_high, - FieldMemOperand(source, HeapNumber::kValueOffset)); - ECMAToInt32NoVFP(result, scratch, input_high, input_low); - } + vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); + ECMAToInt32(result, double_scratch1, double_scratch2, + scratch, input_high, input_low); } -void MacroAssembler::ECMAToInt32VFP(Register result, - DwVfpRegister double_input, - DwVfpRegister double_scratch, - Register scratch, - Register input_high, - Register input_low) { - CpuFeatureScope scope(this, VFP2); +void MacroAssembler::ECMAToInt32(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Register scratch, + Register input_high, + Register input_low) { ASSERT(!input_high.is(result)); ASSERT(!input_low.is(result)); ASSERT(!input_low.is(input_high)); @@ -2561,58 +2509,6 @@ void MacroAssembler::ECMAToInt32VFP(Register result, } -void MacroAssembler::ECMAToInt32NoVFP(Register result, - Register scratch, - Register input_high, - Register input_low) { - ASSERT(!result.is(scratch)); - ASSERT(!result.is(input_high)); - ASSERT(!result.is(input_low)); - ASSERT(!scratch.is(input_high)); - ASSERT(!scratch.is(input_low)); - ASSERT(!input_high.is(input_low)); - - Label both, out_of_range, negate, done; - - Ubfx(scratch, input_high, - HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // Load scratch with exponent. - sub(scratch, scratch, Operand(HeapNumber::kExponentBias)); - // If exponent is negative, 0 < input < 1, the result is 0. - // If exponent is greater than or equal to 84, the 32 less significant - // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), - // the result is 0. - // This test also catch Nan and infinities which also return 0. - cmp(scratch, Operand(84)); - // We do an unsigned comparison so negative numbers are treated as big - // positive number and the two tests above are done in one test. - b(hs, &out_of_range); - - // Load scratch with 20 - exponent. - rsb(scratch, scratch, Operand(20), SetCC); - b(mi, &both); - - // Test 0 and -0. - bic(result, input_high, Operand(HeapNumber::kSignMask)); - orr(result, result, Operand(input_low), SetCC); - b(eq, &done); - // 0 <= exponent <= 20, shift only input_high. - // Scratch contains: 20 - exponent. - Ubfx(result, input_high, - 0, HeapNumber::kMantissaBitsInTopWord); - // Set the implicit 1 before the mantissa part in input_high. - orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - mov(result, Operand(result, LSR, scratch)); - b(&negate); - - bind(&both); - // Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP. - rsb(scratch, scratch, Operand(19)); - ECMAToInt32Tail(result, scratch, input_high, input_low, - &out_of_range, &negate, &done); -} - - void MacroAssembler::ECMAToInt32Tail(Register result, Register scratch, Register input_high, @@ -2715,10 +2611,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) - ? kSaveFPRegs - : kDontSaveFPRegs; - CEntryStub stub(1, mode); + CEntryStub stub(1, kSaveFPRegs); CallStub(&stub); } @@ -3244,27 +3137,24 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result, // Copies a fixed number of fields of heap objects from src to dst. void MacroAssembler::CopyFields(Register dst, Register src, - RegList temps, + DwVfpRegister double_scratch, + SwVfpRegister single_scratch, int field_count) { - // At least one bit set in the first 15 registers. - ASSERT((temps & ((1 << 15) - 1)) != 0); - ASSERT((temps & dst.bit()) == 0); - ASSERT((temps & src.bit()) == 0); - // Primitive implementation using only one temporary register. - - Register tmp = no_reg; - // Find a temp register in temps list. - for (int i = 0; i < 15; i++) { - if ((temps & (1 << i)) != 0) { - tmp.set_code(i); - break; - } + int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize); + for (int i = 0; i < double_count; i++) { + vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes)); + vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes)); } - ASSERT(!tmp.is(no_reg)); - for (int i = 0; i < field_count; i++) { - ldr(tmp, FieldMemOperand(src, i * kPointerSize)); - str(tmp, FieldMemOperand(dst, i * kPointerSize)); + STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize); + STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes); + + int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize); + if (remain != 0) { + vldr(single_scratch, + FieldMemOperand(src, (field_count - 1) * kPointerSize)); + vstr(single_scratch, + FieldMemOperand(dst, (field_count - 1) * kPointerSize)); } } @@ -3463,7 +3353,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(d0, dreg); } else { @@ -3474,7 +3363,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { if (dreg2.is(d0)) { ASSERT(!dreg1.is(d1)); @@ -3493,7 +3381,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg, Register reg) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(d0, dreg); Move(r0, reg); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 958fcacb3..86ae8f22d 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -460,6 +460,19 @@ class MacroAssembler: public Assembler { const MemOperand& dst, Condition cond = al); + // Ensure that FPSCR contains values needed by JavaScript. + // We need the NaNModeControlBit to be sure that operations like + // vadd and vsub generate the Canonical NaN (if a NaN must be generated). + // In VFP3 it will be always the Canonical NaN. + // In VFP2 it will be either the Canonical NaN or the negative version + // of the Canonical NaN. It doesn't matter if we have two values. The aim + // is to be sure to never generate the hole NaN. + void VFPEnsureFPSCRState(Register scratch); + + // If the value is a NaN, canonicalize the value else, do nothing. + void VFPCanonicalizeNaN(const DwVfpRegister value, + const Condition cond = al); + // Compare double values and move the result to the normal condition flags. void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, @@ -743,7 +756,11 @@ class MacroAssembler: public Assembler { Label* gc_required); // Copies a fixed number of fields of heap objects from src to dst. - void CopyFields(Register dst, Register src, RegList temps, int field_count); + void CopyFields(Register dst, + Register src, + DwVfpRegister double_scratch, + SwVfpRegister single_scratch, + int field_count); // Copies a number of bytes from src to dst. All registers are clobbered. On // exit src and dst will point to the place just after where the last byte was @@ -969,20 +986,12 @@ class MacroAssembler: public Assembler { // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. // Exits with 'result' holding the answer and all other registers clobbered. - void ECMAToInt32VFP(Register result, - DwVfpRegister double_input, - DwVfpRegister double_scratch, - Register scratch, - Register input_high, - Register input_low); - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer. - void ECMAToInt32NoVFP(Register result, - Register scratch, - Register input_high, - Register input_low); + void ECMAToInt32(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Register scratch, + Register input_high, + Register input_low); // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // instruction. On pre-ARM5 hardware this routine gives the wrong answer @@ -1140,7 +1149,9 @@ class MacroAssembler: public Assembler { // EABI variant for double arguments in use. bool use_eabi_hardfloat() { -#if USE_EABI_HARDFLOAT +#ifdef __arm__ + return OS::ArmUsingHardFloat(); +#elif USE_EABI_HARDFLOAT return true; #else return false; diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 2551e14e4..ad4d77df2 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -721,7 +721,7 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache, Instruction::kInstrSize) == 0); } else { // Cache miss. Load memory into the cache. - memcpy(cached_line, line, CachePage::kLineLength); + OS::MemCopy(cached_line, line, CachePage::kLineLength); *cache_valid_byte = CachePage::LINE_VALID; } } @@ -773,6 +773,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { c_flag_FPSCR_ = false; v_flag_FPSCR_ = false; FPSCR_rounding_mode_ = RZ; + FPSCR_default_NaN_mode_ = true; inv_op_vfp_flag_ = false; div_zero_vfp_flag_ = false; @@ -902,8 +903,8 @@ double Simulator::get_double_from_register_pair(int reg) { // Read the bits from the unsigned integer register_[] array // into the double precision floating point value and return it. char buffer[2 * sizeof(vfp_registers_[0])]; - memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); - memcpy(&dm_val, buffer, 2 * sizeof(registers_[0])); + OS::MemCopy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); + OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0])); return(dm_val); } @@ -953,9 +954,9 @@ void Simulator::SetVFPRegister(int reg_index, const InputType& value) { if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters()); char buffer[register_size * sizeof(vfp_registers_[0])]; - memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0])); - memcpy(&vfp_registers_[reg_index * register_size], buffer, - register_size * sizeof(vfp_registers_[0])); + OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0])); + OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer, + register_size * sizeof(vfp_registers_[0])); } @@ -967,64 +968,34 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) { ReturnType value = 0; char buffer[register_size * sizeof(vfp_registers_[0])]; - memcpy(buffer, &vfp_registers_[register_size * reg_index], - register_size * sizeof(vfp_registers_[0])); - memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0])); + OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index], + register_size * sizeof(vfp_registers_[0])); + OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0])); return value; } -// For use in calls that take two double values, constructed either +// Runtime FP routines take up to two double arguments and zero +// or one integer arguments. All are constructed here, // from r0-r3 or d0 and d1. -void Simulator::GetFpArgs(double* x, double* y) { +void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { if (use_eabi_hardfloat()) { *x = vfp_registers_[0]; *y = vfp_registers_[1]; + *z = registers_[1]; } else { // We use a char buffer to get around the strict-aliasing rules which // otherwise allow the compiler to optimize away the copy. char buffer[sizeof(*x)]; // Registers 0 and 1 -> x. - memcpy(buffer, registers_, sizeof(*x)); - memcpy(x, buffer, sizeof(*x)); + OS::MemCopy(buffer, registers_, sizeof(*x)); + OS::MemCopy(x, buffer, sizeof(*x)); // Registers 2 and 3 -> y. - memcpy(buffer, registers_ + 2, sizeof(*y)); - memcpy(y, buffer, sizeof(*y)); - } -} - -// For use in calls that take one double value, constructed either -// from r0 and r1 or d0. -void Simulator::GetFpArgs(double* x) { - if (use_eabi_hardfloat()) { - *x = vfp_registers_[0]; - } else { - // We use a char buffer to get around the strict-aliasing rules which - // otherwise allow the compiler to optimize away the copy. - char buffer[sizeof(*x)]; - // Registers 0 and 1 -> x. - memcpy(buffer, registers_, sizeof(*x)); - memcpy(x, buffer, sizeof(*x)); - } -} - - -// For use in calls that take one double value constructed either -// from r0 and r1 or d0 and one integer value. -void Simulator::GetFpArgs(double* x, int32_t* y) { - if (use_eabi_hardfloat()) { - *x = vfp_registers_[0]; - *y = registers_[1]; - } else { - // We use a char buffer to get around the strict-aliasing rules which - // otherwise allow the compiler to optimize away the copy. - char buffer[sizeof(*x)]; - // Registers 0 and 1 -> x. - memcpy(buffer, registers_, sizeof(*x)); - memcpy(x, buffer, sizeof(*x)); - // Register 2 -> y. - memcpy(buffer, registers_ + 2, sizeof(*y)); - memcpy(y, buffer, sizeof(*y)); + OS::MemCopy(buffer, registers_ + 2, sizeof(*y)); + OS::MemCopy(y, buffer, sizeof(*y)); + // Register 2 -> z. + OS::MemCopy(buffer, registers_ + 2, sizeof(*z)); + OS::MemCopy(z, buffer, sizeof(*z)); } } @@ -1033,14 +1004,14 @@ void Simulator::GetFpArgs(double* x, int32_t* y) { void Simulator::SetFpResult(const double& result) { if (use_eabi_hardfloat()) { char buffer[2 * sizeof(vfp_registers_[0])]; - memcpy(buffer, &result, sizeof(buffer)); + OS::MemCopy(buffer, &result, sizeof(buffer)); // Copy result to d0. - memcpy(vfp_registers_, buffer, sizeof(buffer)); + OS::MemCopy(vfp_registers_, buffer, sizeof(buffer)); } else { char buffer[2 * sizeof(registers_[0])]; - memcpy(buffer, &result, sizeof(buffer)); + OS::MemCopy(buffer, &result, sizeof(buffer)); // Copy result to r0 and r1. - memcpy(registers_, buffer, sizeof(buffer)); + OS::MemCopy(registers_, buffer, sizeof(buffer)); } } @@ -1619,12 +1590,12 @@ void Simulator::HandleVList(Instruction* instr) { ReadW(reinterpret_cast<int32_t>(address + 1), instr) }; double d; - memcpy(&d, data, 8); + OS::MemCopy(&d, data, 8); set_d_register_from_double(reg, d); } else { int32_t data[2]; double d = get_double_from_d_register(reg); - memcpy(data, &d, 8); + OS::MemCopy(data, &d, 8); WriteW(reinterpret_cast<int32_t>(address), data[0], instr); WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr); } @@ -1647,10 +1618,12 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg3, int32_t arg4, int32_t arg5); -typedef double (*SimulatorRuntimeFPCall)(int32_t arg0, - int32_t arg1, - int32_t arg2, - int32_t arg3); + +// These prototypes handle the four types of FP calls. +typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1); +typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1); +typedef double (*SimulatorRuntimeFPCall)(double darg0); +typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0); // This signature supports direct call in to API function native callback // (refer to InvocationCallback in v8.h). @@ -1716,27 +1689,27 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { intptr_t external = reinterpret_cast<intptr_t>(redirection->external_function()); if (fp_call) { + double dval0, dval1; // one or two double parameters + int32_t ival; // zero or one integer parameters + int64_t iresult = 0; // integer return value + double dresult = 0; // double return value + GetFpArgs(&dval0, &dval1, &ival); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { - SimulatorRuntimeFPCall target = - reinterpret_cast<SimulatorRuntimeFPCall>(external); - double dval0, dval1; - int32_t ival; + SimulatorRuntimeCall generic_target = + reinterpret_cast<SimulatorRuntimeCall>(external); switch (redirection->type()) { case ExternalReference::BUILTIN_FP_FP_CALL: case ExternalReference::BUILTIN_COMPARE_CALL: - GetFpArgs(&dval0, &dval1); PrintF("Call to host function at %p with args %f, %f", - FUNCTION_ADDR(target), dval0, dval1); + FUNCTION_ADDR(generic_target), dval0, dval1); break; case ExternalReference::BUILTIN_FP_CALL: - GetFpArgs(&dval0); PrintF("Call to host function at %p with arg %f", - FUNCTION_ADDR(target), dval0); + FUNCTION_ADDR(generic_target), dval0); break; case ExternalReference::BUILTIN_FP_INT_CALL: - GetFpArgs(&dval0, &ival); PrintF("Call to host function at %p with args %f, %d", - FUNCTION_ADDR(target), dval0, ival); + FUNCTION_ADDR(generic_target), dval0, ival); break; default: UNREACHABLE(); @@ -1748,22 +1721,54 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { PrintF("\n"); } CHECK(stack_aligned); - if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) { + switch (redirection->type()) { + case ExternalReference::BUILTIN_COMPARE_CALL: { + SimulatorRuntimeCompareCall target = + reinterpret_cast<SimulatorRuntimeCompareCall>(external); + iresult = target(dval0, dval1); + set_register(r0, static_cast<int32_t>(iresult)); + set_register(r1, static_cast<int32_t>(iresult >> 32)); + break; + } + case ExternalReference::BUILTIN_FP_FP_CALL: { + SimulatorRuntimeFPFPCall target = + reinterpret_cast<SimulatorRuntimeFPFPCall>(external); + dresult = target(dval0, dval1); + SetFpResult(dresult); + break; + } + case ExternalReference::BUILTIN_FP_CALL: { SimulatorRuntimeFPCall target = - reinterpret_cast<SimulatorRuntimeFPCall>(external); - double result = target(arg0, arg1, arg2, arg3); - SetFpResult(result); - } else { - SimulatorRuntimeCall target = - reinterpret_cast<SimulatorRuntimeCall>(external); - int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5); - int32_t lo_res = static_cast<int32_t>(result); - int32_t hi_res = static_cast<int32_t>(result >> 32); - if (::v8::internal::FLAG_trace_sim) { - PrintF("Returned %08x\n", lo_res); + reinterpret_cast<SimulatorRuntimeFPCall>(external); + dresult = target(dval0); + SetFpResult(dresult); + break; + } + case ExternalReference::BUILTIN_FP_INT_CALL: { + SimulatorRuntimeFPIntCall target = + reinterpret_cast<SimulatorRuntimeFPIntCall>(external); + dresult = target(dval0, ival); + SetFpResult(dresult); + break; + } + default: + UNREACHABLE(); + break; + } + if (::v8::internal::FLAG_trace_sim || !stack_aligned) { + switch (redirection->type()) { + case ExternalReference::BUILTIN_COMPARE_CALL: + PrintF("Returned %08x\n", static_cast<int32_t>(iresult)); + break; + case ExternalReference::BUILTIN_FP_FP_CALL: + case ExternalReference::BUILTIN_FP_CALL: + case ExternalReference::BUILTIN_FP_INT_CALL: + PrintF("Returned %f\n", dresult); + break; + default: + UNREACHABLE(); + break; } - set_register(r0, lo_res); - set_register(r1, hi_res); } } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { SimulatorRuntimeDirectApiCall target = @@ -1864,6 +1869,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { } +double Simulator::canonicalizeNaN(double value) { + return (FPSCR_default_NaN_mode_ && isnan(value)) ? + FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value; +} + // Stop helper functions. bool Simulator::isStopInstruction(Instruction* instr) { return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode); @@ -2724,11 +2734,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { // vabs double dm_value = get_double_from_d_register(vm); double dd_value = fabs(dm_value); + dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) { // vneg double dm_value = get_double_from_d_register(vm); double dd_value = -dm_value; + dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); @@ -2744,6 +2756,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { // vsqrt double dm_value = get_double_from_d_register(vm); double dd_value = sqrt(dm_value); + dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else if (instr->Opc3Value() == 0x0) { // vmov immediate. @@ -2765,12 +2778,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dn_value = get_double_from_d_register(vn); double dm_value = get_double_from_d_register(vm); double dd_value = dn_value - dm_value; + dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else { // vadd double dn_value = get_double_from_d_register(vn); double dm_value = get_double_from_d_register(vm); double dd_value = dn_value + dm_value; + dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) { @@ -2782,6 +2797,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dn_value = get_double_from_d_register(vn); double dm_value = get_double_from_d_register(vm); double dd_value = dn_value * dm_value; + dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else if ((instr->Opc1Value() == 0x0)) { // vmla, vmls @@ -2799,9 +2815,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { // result with too high precision. set_d_register_from_double(vd, dn_val * dm_val); if (is_vmls) { - set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd)); + set_d_register_from_double( + vd, + canonicalizeNaN(dd_val - get_double_from_d_register(vd))); } else { - set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd)); + set_d_register_from_double( + vd, + canonicalizeNaN(dd_val + get_double_from_d_register(vd))); } } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { // vdiv @@ -2813,6 +2833,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dm_value = get_double_from_d_register(vm); double dd_value = dn_value / dm_value; div_zero_vfp_flag_ = (dm_value == 0); + dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else { UNIMPLEMENTED(); // Not used by V8. @@ -2828,9 +2849,9 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4); double dd_value = get_double_from_d_register(vd); int32_t data[2]; - memcpy(data, &dd_value, 8); + OS::MemCopy(data, &dd_value, 8); data[instr->Bit(21)] = get_register(instr->RtValue()); - memcpy(&dd_value, data, 8); + OS::MemCopy(&dd_value, data, 8); set_d_register_from_double(vd, dd_value); } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x0) && @@ -2846,6 +2867,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { (z_flag_FPSCR_ << 30) | (c_flag_FPSCR_ << 29) | (v_flag_FPSCR_ << 28) | + (FPSCR_default_NaN_mode_ << 25) | (inexact_vfp_flag_ << 4) | (underflow_vfp_flag_ << 3) | (overflow_vfp_flag_ << 2) | @@ -2868,6 +2890,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { z_flag_FPSCR_ = (rt_value >> 30) & 1; c_flag_FPSCR_ = (rt_value >> 29) & 1; v_flag_FPSCR_ = (rt_value >> 28) & 1; + FPSCR_default_NaN_mode_ = (rt_value >> 25) & 1; inexact_vfp_flag_ = (rt_value >> 4) & 1; underflow_vfp_flag_ = (rt_value >> 3) & 1; overflow_vfp_flag_ = (rt_value >> 2) & 1; @@ -3179,13 +3202,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { if (instr->HasL()) { int32_t data[2]; double d = get_double_from_d_register(vm); - memcpy(data, &d, 8); + OS::MemCopy(data, &d, 8); set_register(rt, data[0]); set_register(rn, data[1]); } else { int32_t data[] = { get_register(rt), get_register(rn) }; double d; - memcpy(&d, data, 8); + OS::MemCopy(&d, data, 8); set_d_register_from_double(vm, d); } } @@ -3208,13 +3231,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { ReadW(address + 4, instr) }; double val; - memcpy(&val, data, 8); + OS::MemCopy(&val, data, 8); set_d_register_from_double(vd, val); } else { // Store double to memory: vstr. int32_t data[2]; double val = get_double_from_d_register(vd); - memcpy(data, &val, 8); + OS::MemCopy(data, &val, 8); WriteW(address, data[0], instr); WriteW(address + 4, data[1], instr); } @@ -3437,9 +3460,9 @@ double Simulator::CallFP(byte* entry, double d0, double d1) { } else { int buffer[2]; ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0)); - memcpy(buffer, &d0, sizeof(d0)); + OS::MemCopy(buffer, &d0, sizeof(d0)); set_dw_register(0, buffer); - memcpy(buffer, &d1, sizeof(d1)); + OS::MemCopy(buffer, &d1, sizeof(d1)); set_dw_register(2, buffer); } CallInternal(entry); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index b918ecf96..45ae999b5 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -274,6 +274,7 @@ class Simulator { // Support for VFP. void Compute_FPSCR_Flags(double val1, double val2); void Copy_FPSCR_to_APSR(); + inline double canonicalizeNaN(double value); // Helper functions to decode common "addressing" modes int32_t GetShiftRm(Instruction* instr, bool* carry_out); @@ -347,10 +348,8 @@ class Simulator { void* external_function, v8::internal::ExternalReference::Type type); - // For use in calls that take double value arguments. - void GetFpArgs(double* x, double* y); - void GetFpArgs(double* x); - void GetFpArgs(double* x, int32_t* y); + // Handle arguments and return value for runtime FP functions. + void GetFpArgs(double* x, double* y, int32_t* z); void SetFpResult(const double& result); void TrashCallerSaveRegisters(); @@ -381,6 +380,7 @@ class Simulator { // VFP rounding mode. See ARM DDI 0406B Page A2-29. VFPRoundingMode FPSCR_rounding_mode_; + bool FPSCR_default_NaN_mode_; // VFP FP exception flags architecture state. bool inv_op_vfp_flag_; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index f2d45e190..f22acb470 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -417,30 +417,48 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, } -// Generate StoreField code, value is passed in r0 register. +// Generate code to check that a global property cell is empty. Create +// the property cell at compilation time if no cell exists for the +// property. +static void GenerateCheckPropertyCell(MacroAssembler* masm, + Handle<GlobalObject> global, + Handle<Name> name, + Register scratch, + Label* miss) { + Handle<JSGlobalPropertyCell> cell = + GlobalObject::EnsurePropertyCell(global, name); + ASSERT(cell->value()->IsTheHole()); + __ mov(scratch, Operand(cell)); + __ ldr(scratch, + FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(scratch, ip); + __ b(ne, miss); +} + + +// Generate StoreTransition code, value is passed in r0 register. // When leaving generated code after success, the receiver_reg and name_reg // may be clobbered. Upon branch to miss_label, the receiver and name // registers have their original values. -void StubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label, - Label* miss_restore_name) { +void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Handle<Map> transition, + Handle<Name> name, + Register receiver_reg, + Register name_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Label* miss_label, + Label* miss_restore_name) { // r0 : value Label exit; // Check that the map of the object hasn't changed. - CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS - : REQUIRE_EXACT_MAP; __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label, - DO_SMI_CHECK, mode); + DO_SMI_CHECK, REQUIRE_EXACT_MAP); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -448,7 +466,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } // Check that we are allowed to write this. - if (!transition.is_null() && object->GetPrototype()->IsJSObject()) { + if (object->GetPrototype()->IsJSObject()) { JSObject* holder; // holder == object indicates that no property was found. if (lookup->holder() != *object) { @@ -466,12 +484,18 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // If no property was found, and the holder (the last object in the // prototype chain) is in slow mode, we need to do a negative lookup on the // holder. - if (lookup->holder() == *object && - !holder->HasFastProperties() && - !holder->IsJSGlobalProxy() && - !holder->IsJSGlobalObject()) { - GenerateDictionaryNegativeLookup( - masm, miss_restore_name, holder_reg, name, scratch1, scratch2); + if (lookup->holder() == *object) { + if (holder->IsJSGlobalObject()) { + GenerateCheckPropertyCell( + masm, + Handle<GlobalObject>(GlobalObject::cast(holder)), + name, + scratch1, + miss_restore_name); + } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { + GenerateDictionaryNegativeLookup( + masm, miss_restore_name, holder_reg, name, scratch1, scratch2); + } } } @@ -480,7 +504,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); // Perform map transition for the receiver if necessary. - if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) { + if (object->map()->unused_property_fields() == 0) { // The properties must be extended before we can store the value. // We jump to a runtime call that extends the properties array. __ push(receiver_reg); @@ -494,33 +518,113 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, return; } - int index; - if (!transition.is_null()) { - // Update the map of the object. - __ mov(scratch1, Operand(transition)); - __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + // Update the map of the object. + __ mov(scratch1, Operand(transition)); + __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + + // Update the write barrier for the map field and pass the now unused + // name_reg as scratch register. + __ RecordWriteField(receiver_reg, + HeapObject::kMapOffset, + scratch1, + name_reg, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + + int index = transition->instance_descriptors()->GetFieldIndex( + transition->LastAdded()); - // Update the write barrier for the map field and pass the now unused - // name_reg as scratch register. + // Adjust for the number of properties stored in the object. Even in the + // face of a transition we can use the old map here because the size of the + // object and the number of in-object properties is not going to change. + index -= object->map()->inobject_properties(); + + // TODO(verwaest): Share this code as a code stub. + if (index < 0) { + // Set the property straight into the object. + int offset = object->map()->instance_size() + (index * kPointerSize); + __ str(value_reg, FieldMemOperand(receiver_reg, offset)); + + // Skip updating write barrier if storing a smi. + __ JumpIfSmi(value_reg, &exit); + + // Update the write barrier for the array address. + // Pass the now unused name_reg as a scratch register. + __ mov(name_reg, value_reg); __ RecordWriteField(receiver_reg, - HeapObject::kMapOffset, - scratch1, + offset, name_reg, + scratch1, kLRHasNotBeenSaved, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - index = transition->instance_descriptors()->GetFieldIndex( - transition->LastAdded()); + kDontSaveFPRegs); } else { - index = lookup->GetFieldIndex().field_index(); + // Write to the properties array. + int offset = index * kPointerSize + FixedArray::kHeaderSize; + // Get the properties array + __ ldr(scratch1, + FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); + __ str(value_reg, FieldMemOperand(scratch1, offset)); + + // Skip updating write barrier if storing a smi. + __ JumpIfSmi(value_reg, &exit); + + // Update the write barrier for the array address. + // Ok to clobber receiver_reg and name_reg, since we return. + __ mov(name_reg, value_reg); + __ RecordWriteField(scratch1, + offset, + name_reg, + receiver_reg, + kLRHasNotBeenSaved, + kDontSaveFPRegs); } + // Return the value (register r0). + ASSERT(value_reg.is(r0)); + __ bind(&exit); + __ Ret(); +} + + +// Generate StoreField code, value is passed in r0 register. +// When leaving generated code after success, the receiver_reg and name_reg +// may be clobbered. Upon branch to miss_label, the receiver and name +// registers have their original values. +void StubCompiler::GenerateStoreField(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Register receiver_reg, + Register name_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Label* miss_label) { + // r0 : value + Label exit; + + // Check that the map of the object hasn't changed. + __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label, + DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + + // Perform global security token check if needed. + if (object->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); + } + + // Stub never generated for non-global objects that require access + // checks. + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + + int index = lookup->GetFieldIndex().field_index(); + // Adjust for the number of properties stored in the object. Even in the // face of a transition we can use the old map here because the size of the // object and the number of in-object properties is not going to change. index -= object->map()->inobject_properties(); + // TODO(verwaest): Share this code as a code stub. if (index < 0) { // Set the property straight into the object. int offset = object->map()->instance_size() + (index * kPointerSize); @@ -926,26 +1030,6 @@ class CallInterceptorCompiler BASE_EMBEDDED { }; -// Generate code to check that a global property cell is empty. Create -// the property cell at compilation time if no cell exists for the -// property. -static void GenerateCheckPropertyCell(MacroAssembler* masm, - Handle<GlobalObject> global, - Handle<Name> name, - Register scratch, - Label* miss) { - Handle<JSGlobalPropertyCell> cell = - GlobalObject::EnsurePropertyCell(global, name); - ASSERT(cell->value()->IsTheHole()); - __ mov(scratch, Operand(cell)); - __ ldr(scratch, - FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(scratch, ip); - __ b(ne, miss); -} - - // Calls GenerateCheckPropertyCell for each global object in the prototype chain // from object to (but not including) holder. static void GenerateCheckPropertyCells(MacroAssembler* masm, @@ -975,66 +1059,11 @@ static void StoreIntAsFloat(MacroAssembler* masm, Register dst, Register wordoffset, Register ival, - Register fval, - Register scratch1, - Register scratch2) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, ival); - __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); - __ vcvt_f32_s32(s0, s0); - __ vstr(s0, scratch1, 0); - } else { - Label not_special, done; - // Move sign bit from source to destination. This works because the sign - // bit in the exponent word of the double has the same position and polarity - // as the 2's complement sign bit in a Smi. - ASSERT(kBinary32SignMask == 0x80000000u); - - __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); - // Negate value if it is negative. - __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne); - - // We have -1, 0 or 1, which we treat specially. Register ival contains - // absolute value: it is either equal to 1 (special case of -1 and 1), - // greater than 1 (not a special case) or less than 1 (special case of 0). - __ cmp(ival, Operand(1)); - __ b(gt, ¬_special); - - // For 1 or -1 we need to or in the 0 exponent (biased). - static const uint32_t exponent_word_for_1 = - kBinary32ExponentBias << kBinary32ExponentShift; - - __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); - __ b(&done); - - __ bind(¬_special); - // Count leading zeros. - // Gets the wrong answer for 0, but we already checked for that case above. - Register zeros = scratch2; - __ CountLeadingZeros(zeros, ival, scratch1); - - // Compute exponent and or it into the exponent register. - __ rsb(scratch1, - zeros, - Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); - - __ orr(fval, - fval, - Operand(scratch1, LSL, kBinary32ExponentShift)); - - // Shift up the source chopping the top bit off. - __ add(zeros, zeros, Operand(1)); - // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. - __ mov(ival, Operand(ival, LSL, zeros)); - // And the top (top 20 bits). - __ orr(fval, - fval, - Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); - - __ bind(&done); - __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); - } + Register scratch1) { + __ vmov(s0, ival); + __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); + __ vcvt_f32_s32(s0, s0); + __ vstr(s0, scratch1, 0); } @@ -1225,7 +1254,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend( Handle<GlobalObject> global) { Label miss; - Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss); + HandlerFrontendHeader(object, receiver(), last, name, &miss); // If the last object in the prototype chain is a global object, // check that the global property cell is empty. @@ -1233,13 +1262,6 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend( GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); } - if (!last->HasFastProperties()) { - __ ldr(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset)); - __ ldr(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset)); - __ cmp(scratch2(), Operand(isolate()->factory()->null_value())); - __ b(ne, &miss); - } - HandlerFrontendFooter(success, &miss); } @@ -1599,7 +1621,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); __ StoreNumberToDoubleElements( - r4, r0, elements, r3, r5, r2, r9, + r4, r0, elements, r5, r2, r3, r9, &call_builtin, argc * kDoubleSize); // Save new length. @@ -2089,11 +2111,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( // -- sp[argc * 4] : receiver // ----------------------------------- - if (!CpuFeatures::IsSupported(VFP2)) { - return Handle<Code>::null(); - } - - CpuFeatureScope scope_vfp2(masm(), VFP2); const int argc = arguments().immediate(); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. @@ -3133,36 +3150,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( } -static bool IsElementTypeSigned(ElementsKind elements_kind) { - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - return true; - - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - case EXTERNAL_PIXEL_ELEMENTS: - return false; - - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - return false; - } - return false; -} - - static void GenerateSmiKeyCheck(MacroAssembler* masm, Register key, Register scratch0, @@ -3170,29 +3157,23 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, DwVfpRegister double_scratch0, DwVfpRegister double_scratch1, Label* fail) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - Label key_ok; - // Check for smi or a smi inside a heap number. We convert the heap - // number and check if the conversion is exact and fits into the smi - // range. - __ JumpIfSmi(key, &key_ok); - __ CheckMap(key, - scratch0, - Heap::kHeapNumberMapRootIndex, - fail, - DONT_DO_SMI_CHECK); - __ sub(ip, key, Operand(kHeapObjectTag)); - __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); - __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1); - __ b(ne, fail); - __ TrySmiTag(scratch0, fail, scratch1); - __ mov(key, scratch0); - __ bind(&key_ok); - } else { - // Check that the key is a smi. - __ JumpIfNotSmi(key, fail); - } + Label key_ok; + // Check for smi or a smi inside a heap number. We convert the heap + // number and check if the conversion is exact and fits into the smi + // range. + __ JumpIfSmi(key, &key_ok); + __ CheckMap(key, + scratch0, + Heap::kHeapNumberMapRootIndex, + fail, + DONT_DO_SMI_CHECK); + __ sub(ip, key, Operand(kHeapObjectTag)); + __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); + __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1); + __ b(ne, fail); + __ TrySmiTag(scratch0, fail, scratch1); + __ mov(key, scratch0); + __ bind(&key_ok); } @@ -3262,28 +3243,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: // Perform int-to-float conversion and store to memory. __ SmiUntag(r4, key); - StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); + StoreIntAsFloat(masm, r3, r4, r5, r7); break; case EXTERNAL_DOUBLE_ELEMENTS: __ add(r3, r3, Operand(key, LSL, 2)); // r3: effective address of the double element FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP2)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } + destination = FloatingPointHelper::kVFPRegisters; FloatingPointHelper::ConvertIntToDouble( masm, r5, destination, d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. r4, s2); // These are: scratch2, single_scratch. - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatureScope scope(masm, VFP2); - __ vstr(d0, r3, 0); - } else { - __ str(r6, MemOperand(r3, 0)); - __ str(r7, MemOperand(r3, Register::kSizeInBytes)); - } + __ vstr(d0, r3, 0); break; case FAST_ELEMENTS: case FAST_SMI_ELEMENTS: @@ -3313,201 +3284,59 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // The WebGL specification leaves the behavior of storing NaN and // +/-Infinity into integer arrays basically undefined. For more // reproducible behavior, convert these to zero. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - // vldr requires offset to be a multiple of 4 so we can not - // include -kHeapObjectTag into it. - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ add(r5, r3, Operand(key, LSL, 1)); - __ vcvt_f32_f64(s0, d0); - __ vstr(s0, r5, 0); - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ add(r5, r3, Operand(key, LSL, 2)); - __ vstr(d0, r5, 0); - } else { - // Hoisted load. vldr requires offset to be a multiple of 4 so we can - // not include -kHeapObjectTag into it. - __ sub(r5, value, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ ECMAToInt32VFP(r5, d0, d1, r6, r7, r9); - - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ strb(r5, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ strh(r5, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ str(r5, MemOperand(r3, key, LSL, 1)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } - // Entry registers are intact, r0 holds the value which is the return - // value. - __ Ret(); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + // vldr requires offset to be a multiple of 4 so we can not + // include -kHeapObjectTag into it. + __ sub(r5, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ add(r5, r3, Operand(key, LSL, 1)); + __ vcvt_f32_f64(s0, d0); + __ vstr(s0, r5, 0); + } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + __ sub(r5, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ add(r5, r3, Operand(key, LSL, 2)); + __ vstr(d0, r5, 0); } else { - // VFP3 is not available do manual conversions. - __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); - __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - Label done, nan_or_infinity_or_zero; - static const int kMantissaInHiWordShift = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaInLoWordShift = - kBitsPerInt - kMantissaInHiWordShift; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ b(eq, &nan_or_infinity_or_zero); - - __ teq(r9, Operand(r7)); - __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); - __ b(eq, &nan_or_infinity_or_zero); - - // Rebias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ add(r9, - r9, - Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); - - __ cmp(r9, Operand(kBinary32MaxExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); - __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); - __ b(gt, &done); - - __ cmp(r9, Operand(kBinary32MinExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); - __ b(lt, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); - __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); - - __ bind(&done); - __ str(r5, MemOperand(r3, key, LSL, 1)); - // Entry registers are intact, r0 holds the value which is the return - // value. - __ Ret(); - - __ bind(&nan_or_infinity_or_zero); - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r9, r9, r7); - __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); - __ b(&done); - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ add(r7, r3, Operand(key, LSL, 2)); - // r7: effective address of destination element. - __ str(r6, MemOperand(r7, 0)); - __ str(r5, MemOperand(r7, Register::kSizeInBytes)); - __ Ret(); - } else { - bool is_signed_type = IsElementTypeSigned(elements_kind); - int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; - int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; - - Label done, sign; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ mov(r5, Operand::Zero(), LeaveCC, eq); - __ b(eq, &done); - - __ teq(r9, Operand(r7)); - __ mov(r5, Operand::Zero(), LeaveCC, eq); - __ b(eq, &done); - - // Unbias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); - // If exponent is negative then result is 0. - __ mov(r5, Operand::Zero(), LeaveCC, mi); - __ b(mi, &done); - - // If exponent is too big then result is minimal value. - __ cmp(r9, Operand(meaningfull_bits - 1)); - __ mov(r5, Operand(min_value), LeaveCC, ge); - __ b(ge, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); - - __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); - __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); - __ b(pl, &sign); - - __ rsb(r9, r9, Operand::Zero()); - __ mov(r5, Operand(r5, LSL, r9)); - __ rsb(r9, r9, Operand(meaningfull_bits)); - __ orr(r5, r5, Operand(r6, LSR, r9)); - - __ bind(&sign); - __ teq(r7, Operand::Zero()); - __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne); - - __ bind(&done); - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ strb(r5, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ strh(r5, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ str(r5, MemOperand(r3, key, LSL, 1)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } + // Hoisted load. vldr requires offset to be a multiple of 4 so we can + // not include -kHeapObjectTag into it. + __ sub(r5, value, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ ECMAToInt32(r5, d0, d1, r6, r7, r9); + + switch (elements_kind) { + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + __ strb(r5, MemOperand(r3, key, LSR, 1)); + break; + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + __ strh(r5, MemOperand(r3, key, LSL, 0)); + break; + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + __ str(r5, MemOperand(r3, key, LSL, 1)); + break; + case EXTERNAL_PIXEL_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; } } + + // Entry registers are intact, r0 holds the value which is the return + // value. + __ Ret(); } // Slow case, key and receiver still in r0 and r1. @@ -3757,9 +3586,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // All registers after this are overwritten. elements_reg, scratch1, - scratch2, scratch3, scratch4, + scratch2, &transition_elements_kind); __ Ret(); |