diff options
Diffstat (limited to 'chromium/v8/src/codegen/x64/macro-assembler-x64.cc')
-rw-r--r-- | chromium/v8/src/codegen/x64/macro-assembler-x64.cc | 82 |
1 files changed, 63 insertions, 19 deletions
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc index 493c7110098..f13811b1aec 100644 --- a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc @@ -317,15 +317,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, void TurboAssembler::DecompressRegisterAnyTagged(Register destination, Register scratch) { - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInGeneratedCode) { // Branchlessly compute |masked_root|: // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32)); Register masked_root = scratch; - movl(masked_root, destination); - andl(masked_root, Immediate(kSmiTagMask)); - negq(masked_root); - andq(masked_root, kRootRegister); + xorq(masked_root, masked_root); + Condition smi = CheckSmi(destination); + cmovq(NegateCondition(smi), masked_root, kRootRegister); // Now this add operation will either leave the value unchanged if it is // a smi or add the isolate root if it is a heap object. addq(destination, masked_root); @@ -917,7 +916,7 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2ss(dst, kScratchRegister); - addss(dst, dst); + Addss(dst, dst); bind(&done); } @@ -941,7 +940,7 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2sd(dst, kScratchRegister); - addsd(dst, dst); + Addsd(dst, dst); bind(&done); } @@ -1042,11 +1041,11 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst, // and convert it again to see if it is within the uint64 range. if (is_double) { tasm->Move(kScratchDoubleReg, -9223372036854775808.0); - tasm->addsd(kScratchDoubleReg, src); + tasm->Addsd(kScratchDoubleReg, src); tasm->Cvttsd2siq(dst, kScratchDoubleReg); } else { tasm->Move(kScratchDoubleReg, -9223372036854775808.0f); - tasm->addss(kScratchDoubleReg, src); + tasm->Addss(kScratchDoubleReg, src); tasm->Cvttss2siq(dst, kScratchDoubleReg); } tasm->testq(dst, dst); @@ -1468,8 +1467,9 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object, } } if (RelocInfo::IsCompressedEmbeddedObject(rmode)) { - int compressed_embedded_object_index = AddCompressedEmbeddedObject(object); - movl(result, Immediate(compressed_embedded_object_index, rmode)); + EmbeddedObjectIndex index = AddEmbeddedObject(object); + DCHECK(is_uint32(index)); + movl(result, Immediate(static_cast<int>(index), rmode)); } else { DCHECK(RelocInfo::IsFullEmbeddedObject(rmode)); movq(result, Immediate64(object.address(), rmode)); @@ -1607,29 +1607,33 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below (we use times_4 instead // of times_8 since smis are already shifted by one). - Call(Operand(kRootRegister, builtin_pointer, times_4, - IsolateData::builtin_entry_table_offset())); + return Operand(kRootRegister, builtin_index, times_4, + IsolateData::builtin_entry_table_offset()); #else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. - SmiUntag(builtin_pointer, builtin_pointer); - Call(Operand(kRootRegister, builtin_pointer, times_8, - IsolateData::builtin_entry_table_offset())); + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + return Operand(kRootRegister, builtin_index, times_8, + IsolateData::builtin_entry_table_offset()); #endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) } +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + Call(EntryFromBuiltinIndexAsOperand(builtin_index)); +} + void TurboAssembler::LoadCodeObjectEntry(Register destination, Register code_object) { // Code objects are called differently depending on whether we are generating @@ -1767,6 +1771,46 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { } } +void TurboAssembler::Psllq(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsllq(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psllq(dst, imm8); + } +} + +void TurboAssembler::Psrlq(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrlq(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psrlq(dst, imm8); + } +} + +void TurboAssembler::Pslld(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpslld(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + pslld(dst, imm8); + } +} + +void TurboAssembler::Psrld(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrld(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psrld(dst, imm8); + } +} + void TurboAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); |