summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h460
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc1174
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h384
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc148
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc826
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h77
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc144
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h4
-rw-r--r--deps/v8/src/arm64/constants-arm64.h17
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc37
-rw-r--r--deps/v8/src/arm64/cpu-arm64.h71
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc162
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h30
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc14
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h4
-rw-r--r--deps/v8/src/arm64/delayed-masm-arm64-inl.h55
-rw-r--r--deps/v8/src/arm64/delayed-masm-arm64.cc198
-rw-r--r--deps/v8/src/arm64/delayed-masm-arm64.h164
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc90
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc70
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h10
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc10
-rw-r--r--deps/v8/src/arm64/frames-arm64.h5
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc731
-rw-r--r--deps/v8/src/arm64/ic-arm64.cc412
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc28
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h58
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc9
-rw-r--r--deps/v8/src/arm64/instrument-arm64.h11
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc533
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h484
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc1089
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h67
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.cc162
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.h46
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h693
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc1250
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h226
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc153
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h9
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc770
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h214
-rw-r--r--deps/v8/src/arm64/stub-cache-arm64.cc959
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc17
-rw-r--r--deps/v8/src/arm64/utils-arm64.h10
45 files changed, 6400 insertions, 5685 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 3c17153f6..3b24197eb 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -5,24 +5,30 @@
#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
-#include "arm64/assembler-arm64.h"
-#include "cpu.h"
-#include "debug.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/assembler.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-void RelocInfo::apply(intptr_t delta) {
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
+
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
UNIMPLEMENTED();
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -31,54 +37,54 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
inline unsigned CPURegister::code() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return reg_code;
}
inline CPURegister::RegisterType CPURegister::type() const {
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
return reg_type;
}
inline RegList CPURegister::Bit() const {
- ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
+ DCHECK(reg_code < (sizeof(RegList) * kBitsPerByte));
return IsValid() ? 1UL << reg_code : 0;
}
inline unsigned CPURegister::SizeInBits() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return reg_size;
}
inline int CPURegister::SizeInBytes() const {
- ASSERT(IsValid());
- ASSERT(SizeInBits() % 8 == 0);
+ DCHECK(IsValid());
+ DCHECK(SizeInBits() % 8 == 0);
return reg_size / 8;
}
inline bool CPURegister::Is32Bits() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return reg_size == 32;
}
inline bool CPURegister::Is64Bits() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return reg_size == 64;
}
inline bool CPURegister::IsValid() const {
if (IsValidRegister() || IsValidFPRegister()) {
- ASSERT(!IsNone());
+ DCHECK(!IsNone());
return true;
} else {
- ASSERT(IsNone());
+ DCHECK(IsNone());
return false;
}
}
@@ -100,17 +106,22 @@ inline bool CPURegister::IsValidFPRegister() const {
inline bool CPURegister::IsNone() const {
// kNoRegister types should always have size 0 and code 0.
- ASSERT((reg_type != kNoRegister) || (reg_code == 0));
- ASSERT((reg_type != kNoRegister) || (reg_size == 0));
+ DCHECK((reg_type != kNoRegister) || (reg_code == 0));
+ DCHECK((reg_type != kNoRegister) || (reg_size == 0));
return reg_type == kNoRegister;
}
inline bool CPURegister::Is(const CPURegister& other) const {
- ASSERT(IsValidOrNone() && other.IsValidOrNone());
- return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
- (reg_type == other.reg_type);
+ DCHECK(IsValidOrNone() && other.IsValidOrNone());
+ return Aliases(other) && (reg_size == other.reg_size);
+}
+
+
+inline bool CPURegister::Aliases(const CPURegister& other) const {
+ DCHECK(IsValidOrNone() && other.IsValidOrNone());
+ return (reg_code == other.reg_code) && (reg_type == other.reg_type);
}
@@ -135,27 +146,27 @@ inline bool CPURegister::IsValidOrNone() const {
inline bool CPURegister::IsZero() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return IsRegister() && (reg_code == kZeroRegCode);
}
inline bool CPURegister::IsSP() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return IsRegister() && (reg_code == kSPRegInternalCode);
}
inline void CPURegList::Combine(const CPURegList& other) {
- ASSERT(IsValid());
- ASSERT(other.type() == type_);
- ASSERT(other.RegisterSizeInBits() == size_);
+ DCHECK(IsValid());
+ DCHECK(other.type() == type_);
+ DCHECK(other.RegisterSizeInBits() == size_);
list_ |= other.list();
}
inline void CPURegList::Remove(const CPURegList& other) {
- ASSERT(IsValid());
+ DCHECK(IsValid());
if (other.type() == type_) {
list_ &= ~other.list();
}
@@ -163,8 +174,8 @@ inline void CPURegList::Remove(const CPURegList& other) {
inline void CPURegList::Combine(const CPURegister& other) {
- ASSERT(other.type() == type_);
- ASSERT(other.SizeInBits() == size_);
+ DCHECK(other.type() == type_);
+ DCHECK(other.SizeInBits() == size_);
Combine(other.code());
}
@@ -181,92 +192,92 @@ inline void CPURegList::Remove(const CPURegister& other1,
inline void CPURegList::Combine(int code) {
- ASSERT(IsValid());
- ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ DCHECK(IsValid());
+ DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ |= (1UL << code);
}
inline void CPURegList::Remove(int code) {
- ASSERT(IsValid());
- ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ DCHECK(IsValid());
+ DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ &= ~(1UL << code);
}
inline Register Register::XRegFromCode(unsigned code) {
- // This function returns the zero register when code = 31. The stack pointer
- // can not be returned.
- ASSERT(code < kNumberOfRegisters);
- return Register::Create(code, kXRegSizeInBits);
+ if (code == kSPRegInternalCode) {
+ return csp;
+ } else {
+ DCHECK(code < kNumberOfRegisters);
+ return Register::Create(code, kXRegSizeInBits);
+ }
}
inline Register Register::WRegFromCode(unsigned code) {
- ASSERT(code < kNumberOfRegisters);
- return Register::Create(code, kWRegSizeInBits);
+ if (code == kSPRegInternalCode) {
+ return wcsp;
+ } else {
+ DCHECK(code < kNumberOfRegisters);
+ return Register::Create(code, kWRegSizeInBits);
+ }
}
inline FPRegister FPRegister::SRegFromCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
+ DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kSRegSizeInBits);
}
inline FPRegister FPRegister::DRegFromCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
+ DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kDRegSizeInBits);
}
inline Register CPURegister::W() const {
- ASSERT(IsValidRegister());
+ DCHECK(IsValidRegister());
return Register::WRegFromCode(reg_code);
}
inline Register CPURegister::X() const {
- ASSERT(IsValidRegister());
+ DCHECK(IsValidRegister());
return Register::XRegFromCode(reg_code);
}
inline FPRegister CPURegister::S() const {
- ASSERT(IsValidFPRegister());
+ DCHECK(IsValidFPRegister());
return FPRegister::SRegFromCode(reg_code);
}
inline FPRegister CPURegister::D() const {
- ASSERT(IsValidFPRegister());
+ DCHECK(IsValidFPRegister());
return FPRegister::DRegFromCode(reg_code);
}
-// Operand.
-template<typename T>
-Operand::Operand(Handle<T> value) : reg_(NoReg) {
- initialize_handle(value);
-}
-
-
+// Immediate.
// Default initializer is for int types
-template<typename int_t>
-struct OperandInitializer {
+template<typename T>
+struct ImmediateInitializer {
static const bool kIsIntType = true;
- static inline RelocInfo::Mode rmode_for(int_t) {
- return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
+ static inline RelocInfo::Mode rmode_for(T) {
+ return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
}
- static inline int64_t immediate_for(int_t t) {
- STATIC_ASSERT(sizeof(int_t) <= 8);
+ static inline int64_t immediate_for(T t) {
+ STATIC_ASSERT(sizeof(T) <= 8);
return t;
}
};
template<>
-struct OperandInitializer<Smi*> {
+struct ImmediateInitializer<Smi*> {
static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(Smi* t) {
return RelocInfo::NONE64;
@@ -278,7 +289,7 @@ struct OperandInitializer<Smi*> {
template<>
-struct OperandInitializer<ExternalReference> {
+struct ImmediateInitializer<ExternalReference> {
static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(ExternalReference t) {
return RelocInfo::EXTERNAL_REFERENCE;
@@ -290,45 +301,64 @@ struct OperandInitializer<ExternalReference> {
template<typename T>
-Operand::Operand(T t)
- : immediate_(OperandInitializer<T>::immediate_for(t)),
- reg_(NoReg),
- rmode_(OperandInitializer<T>::rmode_for(t)) {}
+Immediate::Immediate(Handle<T> value) {
+ InitializeHandle(value);
+}
template<typename T>
-Operand::Operand(T t, RelocInfo::Mode rmode)
- : immediate_(OperandInitializer<T>::immediate_for(t)),
- reg_(NoReg),
+Immediate::Immediate(T t)
+ : value_(ImmediateInitializer<T>::immediate_for(t)),
+ rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
+
+
+template<typename T>
+Immediate::Immediate(T t, RelocInfo::Mode rmode)
+ : value_(ImmediateInitializer<T>::immediate_for(t)),
rmode_(rmode) {
- STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
+ STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
}
+// Operand.
+template<typename T>
+Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
+
+
+template<typename T>
+Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
+
+
+template<typename T>
+Operand::Operand(T t, RelocInfo::Mode rmode)
+ : immediate_(t, rmode),
+ reg_(NoReg) {}
+
+
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
- : reg_(reg),
+ : immediate_(0),
+ reg_(reg),
shift_(shift),
extend_(NO_EXTEND),
- shift_amount_(shift_amount),
- rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
- ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
- ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
- ASSERT(!reg.IsSP());
+ shift_amount_(shift_amount) {
+ DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
+ DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
+ DCHECK(!reg.IsSP());
}
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
- : reg_(reg),
+ : immediate_(0),
+ reg_(reg),
shift_(NO_SHIFT),
extend_(extend),
- shift_amount_(shift_amount),
- rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
- ASSERT(reg.IsValid());
- ASSERT(shift_amount <= 4);
- ASSERT(!reg.IsSP());
+ shift_amount_(shift_amount) {
+ DCHECK(reg.IsValid());
+ DCHECK(shift_amount <= 4);
+ DCHECK(!reg.IsSP());
// Extend modes SXTX and UXTX require a 64-bit register.
- ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+ DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
}
@@ -349,7 +379,7 @@ bool Operand::IsExtendedRegister() const {
bool Operand::IsZero() const {
if (IsImmediate()) {
- return immediate() == 0;
+ return ImmediateValue() == 0;
} else {
return reg().IsZero();
}
@@ -357,51 +387,61 @@ bool Operand::IsZero() const {
Operand Operand::ToExtendedRegister() const {
- ASSERT(IsShiftedRegister());
- ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ DCHECK(IsShiftedRegister());
+ DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
-int64_t Operand::immediate() const {
- ASSERT(IsImmediate());
+Immediate Operand::immediate() const {
+ DCHECK(IsImmediate());
return immediate_;
}
+int64_t Operand::ImmediateValue() const {
+ DCHECK(IsImmediate());
+ return immediate_.value();
+}
+
+
Register Operand::reg() const {
- ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ DCHECK(IsShiftedRegister() || IsExtendedRegister());
return reg_;
}
Shift Operand::shift() const {
- ASSERT(IsShiftedRegister());
+ DCHECK(IsShiftedRegister());
return shift_;
}
Extend Operand::extend() const {
- ASSERT(IsExtendedRegister());
+ DCHECK(IsExtendedRegister());
return extend_;
}
unsigned Operand::shift_amount() const {
- ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ DCHECK(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
Operand Operand::UntagSmi(Register smi) {
- ASSERT(smi.Is64Bits());
+ STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
+ kSmiValueSize));
+ DCHECK(smi.Is64Bits());
return Operand(smi, ASR, kSmiShift);
}
Operand Operand::UntagSmiAndScale(Register smi, int scale) {
- ASSERT(smi.Is64Bits());
- ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+ STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
+ kSmiValueSize));
+ DCHECK(smi.Is64Bits());
+ DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
if (scale > kSmiShift) {
return Operand(smi, LSL, scale - kSmiShift);
} else if (scale < kSmiShift) {
@@ -420,7 +460,7 @@ MemOperand::MemOperand()
MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
: base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
- ASSERT(base.Is64Bits() && !base.IsZero());
+ DCHECK(base.Is64Bits() && !base.IsZero());
}
@@ -430,12 +470,12 @@ MemOperand::MemOperand(Register base,
unsigned shift_amount)
: base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
- ASSERT(base.Is64Bits() && !base.IsZero());
- ASSERT(!regoffset.IsSP());
- ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+ DCHECK(base.Is64Bits() && !base.IsZero());
+ DCHECK(!regoffset.IsSP());
+ DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
// SXTX extend mode requires a 64-bit offset register.
- ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+ DCHECK(regoffset.Is64Bits() || (extend != SXTX));
}
@@ -445,22 +485,22 @@ MemOperand::MemOperand(Register base,
unsigned shift_amount)
: base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
- ASSERT(base.Is64Bits() && !base.IsZero());
- ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
- ASSERT(shift == LSL);
+ DCHECK(base.Is64Bits() && !base.IsZero());
+ DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
+ DCHECK(shift == LSL);
}
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
: base_(base), addrmode_(addrmode) {
- ASSERT(base.Is64Bits() && !base.IsZero());
+ DCHECK(base.Is64Bits() && !base.IsZero());
if (offset.IsImmediate()) {
- offset_ = offset.immediate();
+ offset_ = offset.ImmediateValue();
regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
- ASSERT(addrmode == Offset);
+ DCHECK(addrmode == Offset);
regoffset_ = offset.reg();
shift_= offset.shift();
@@ -470,11 +510,11 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
offset_ = 0;
// These assertions match those in the shifted-register constructor.
- ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
- ASSERT(shift_ == LSL);
+ DCHECK(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ DCHECK(shift_ == LSL);
} else {
- ASSERT(offset.IsExtendedRegister());
- ASSERT(addrmode == Offset);
+ DCHECK(offset.IsExtendedRegister());
+ DCHECK(addrmode == Offset);
regoffset_ = offset.reg();
extend_ = offset.extend();
@@ -484,9 +524,9 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
offset_ = 0;
// These assertions match those in the extended-register constructor.
- ASSERT(!regoffset_.IsSP());
- ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
- ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ DCHECK(!regoffset_.IsSP());
+ DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ DCHECK((regoffset_.Is64Bits() || (extend_ != SXTX)));
}
}
@@ -513,7 +553,7 @@ Operand MemOperand::OffsetAsOperand() const {
if (IsImmediateOffset()) {
return offset();
} else {
- ASSERT(IsRegisterOffset());
+ DCHECK(IsRegisterOffset());
if (extend() == NO_EXTEND) {
return Operand(regoffset(), shift(), shift_amount());
} else {
@@ -535,7 +575,7 @@ void Assembler::Unreachable() {
Address Assembler::target_pointer_address_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
- ASSERT(instr->IsLdrLiteralX());
+ DCHECK(instr->IsLdrLiteralX());
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
}
@@ -562,11 +602,16 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address candidate = pc - 2 * kInstructionSize;
Instruction* instr = reinterpret_cast<Instruction*>(candidate);
USE(instr);
- ASSERT(instr->IsLdrLiteralX());
+ DCHECK(instr->IsLdrLiteralX());
return candidate;
}
+Address Assembler::break_address_from_return_address(Address pc) {
+ return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
Address Assembler::return_address_from_call_start(Address pc) {
// The call, generated by MacroAssembler::Call, is one of two possible
// sequences:
@@ -590,14 +635,14 @@ Address Assembler::return_address_from_call_start(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsMovz()) {
// Verify the instruction sequence.
- ASSERT(instr->following(1)->IsMovk());
- ASSERT(instr->following(2)->IsMovk());
- ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
+ DCHECK(instr->following(1)->IsMovk());
+ DCHECK(instr->following(2)->IsMovk());
+ DCHECK(instr->following(3)->IsBranchAndLinkToRegister());
return pc + Assembler::kCallSizeWithoutRelocation;
} else {
// Verify the instruction sequence.
- ASSERT(instr->IsLdrLiteralX());
- ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
+ DCHECK(instr->IsLdrLiteralX());
+ DCHECK(instr->following(1)->IsBranchAndLinkToRegister());
return pc + Assembler::kCallSizeWithRelocation;
}
}
@@ -611,11 +656,12 @@ void Assembler::deserialization_set_special_target_at(
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
- Address target) {
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
+ // CpuFeatures::FlushICache(pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@@ -626,9 +672,10 @@ void Assembler::set_target_address_at(Address pc,
void Assembler::set_target_address_at(Address pc,
Code* code,
- Address target) {
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target);
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -638,13 +685,13 @@ int RelocInfo::target_address_size() {
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_pointer_address_at(pc_);
@@ -652,30 +699,32 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
- ASSERT(IsInConstantPool());
+ DCHECK(IsInConstantPool());
return Assembler::target_pointer_address_at(pc_);
}
Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_, host_)));
}
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- ASSERT(!target->IsConsString());
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, host_,
- reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -685,21 +734,24 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
- ASSERT(IsRuntimeEntry(rmode_));
+ DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
- ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target) {
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+ }
}
@@ -711,12 +763,14 @@ Handle<Cell> RelocInfo::target_cell_handle() {
Cell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::CELL);
+ DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
UNIMPLEMENTED();
}
@@ -732,16 +786,17 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
// Read the stub entry point from the code age sequence.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ DCHECK(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
// Overwrite the stub entry point in the code age sequence. This is loaded as
// a literal so there is no need to call FlushICache here.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
@@ -750,7 +805,7 @@ void RelocInfo::set_code_age_stub(Code* stub) {
Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// For the above sequences the Relocinfo points to the load literal loading
// the call address.
@@ -759,7 +814,7 @@ Address RelocInfo::call_address() {
void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
@@ -771,7 +826,7 @@ void RelocInfo::set_call_address(Address target) {
void RelocInfo::WipeOut() {
- ASSERT(IsEmbeddedObject(rmode_) ||
+ DCHECK(IsEmbeddedObject(rmode_) ||
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
@@ -843,11 +898,11 @@ void RelocInfo::Visit(Heap* heap) {
LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
- ASSERT(rt.IsValid());
+ DCHECK(rt.IsValid());
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x : LDR_w;
} else {
- ASSERT(rt.IsFPRegister());
+ DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDR_d : LDR_s;
}
}
@@ -855,23 +910,23 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
+ DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDP_x : LDP_w;
} else {
- ASSERT(rt.IsFPRegister());
+ DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDP_d : LDP_s;
}
}
LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
- ASSERT(rt.IsValid());
+ DCHECK(rt.IsValid());
if (rt.IsRegister()) {
return rt.Is64Bits() ? STR_x : STR_w;
} else {
- ASSERT(rt.IsFPRegister());
+ DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STR_d : STR_s;
}
}
@@ -879,12 +934,12 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
+ DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? STP_x : STP_w;
} else {
- ASSERT(rt.IsFPRegister());
+ DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STP_d : STP_s;
}
}
@@ -892,12 +947,12 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
+ DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDNP_x : LDNP_w;
} else {
- ASSERT(rt.IsFPRegister());
+ DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDNP_d : LDNP_s;
}
}
@@ -905,21 +960,31 @@ LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
+ DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? STNP_x : STNP_w;
} else {
- ASSERT(rt.IsFPRegister());
+ DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STNP_d : STNP_s;
}
}
+LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
+ } else {
+ DCHECK(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
+ }
+}
+
+
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
- ASSERT(kStartOfLabelLinkChain == 0);
+ DCHECK(kStartOfLabelLinkChain == 0);
int offset = LinkAndGetByteOffsetTo(label);
- ASSERT(IsAligned(offset, kInstructionSize));
+ DCHECK(IsAligned(offset, kInstructionSize));
return offset >> kInstructionSizeLog2;
}
@@ -974,7 +1039,7 @@ Instr Assembler::ImmTestBranch(int imm14) {
Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
- ASSERT(is_uint6(bit_pos));
+ DCHECK(is_uint6(bit_pos));
// Subtract five from the shift offset, as we need bit 5 from bit_pos.
unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
@@ -990,7 +1055,7 @@ Instr Assembler::SF(Register rd) {
Instr Assembler::ImmAddSub(int64_t imm) {
- ASSERT(IsImmAddSub(imm));
+ DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
return imm << ImmAddSub_offset;
} else {
@@ -1000,7 +1065,7 @@ Instr Assembler::ImmAddSub(int64_t imm) {
Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
- ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
+ DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
((reg_size == kWRegSizeInBits) && is_uint5(imms)));
USE(reg_size);
return imms << ImmS_offset;
@@ -1008,26 +1073,26 @@ Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
- ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
USE(reg_size);
- ASSERT(is_uint6(immr));
+ DCHECK(is_uint6(immr));
return immr << ImmR_offset;
}
Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
- ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
- ASSERT(is_uint6(imms));
- ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
+ DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ DCHECK(is_uint6(imms));
+ DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
USE(reg_size);
return imms << ImmSetBits_offset;
}
Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
- ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
- ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
USE(reg_size);
return immr << ImmRotate_offset;
@@ -1041,21 +1106,21 @@ Instr Assembler::ImmLLiteral(int imm19) {
Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
- ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
- ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
+ DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
USE(reg_size);
return bitn << BitN_offset;
}
Instr Assembler::ShiftDP(Shift shift) {
- ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
return shift << ShiftDP_offset;
}
Instr Assembler::ImmDPShift(unsigned amount) {
- ASSERT(is_uint6(amount));
+ DCHECK(is_uint6(amount));
return amount << ImmDPShift_offset;
}
@@ -1066,13 +1131,13 @@ Instr Assembler::ExtendMode(Extend extend) {
Instr Assembler::ImmExtendShift(unsigned left_shift) {
- ASSERT(left_shift <= 4);
+ DCHECK(left_shift <= 4);
return left_shift << ImmExtendShift_offset;
}
Instr Assembler::ImmCondCmp(unsigned imm) {
- ASSERT(is_uint5(imm));
+ DCHECK(is_uint5(imm));
return imm << ImmCondCmp_offset;
}
@@ -1083,75 +1148,75 @@ Instr Assembler::Nzcv(StatusFlags nzcv) {
Instr Assembler::ImmLSUnsigned(int imm12) {
- ASSERT(is_uint12(imm12));
+ DCHECK(is_uint12(imm12));
return imm12 << ImmLSUnsigned_offset;
}
Instr Assembler::ImmLS(int imm9) {
- ASSERT(is_int9(imm9));
+ DCHECK(is_int9(imm9));
return truncate_to_int9(imm9) << ImmLS_offset;
}
Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
- ASSERT(((imm7 >> size) << size) == imm7);
+ DCHECK(((imm7 >> size) << size) == imm7);
int scaled_imm7 = imm7 >> size;
- ASSERT(is_int7(scaled_imm7));
+ DCHECK(is_int7(scaled_imm7));
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
}
Instr Assembler::ImmShiftLS(unsigned shift_amount) {
- ASSERT(is_uint1(shift_amount));
+ DCHECK(is_uint1(shift_amount));
return shift_amount << ImmShiftLS_offset;
}
Instr Assembler::ImmException(int imm16) {
- ASSERT(is_uint16(imm16));
+ DCHECK(is_uint16(imm16));
return imm16 << ImmException_offset;
}
Instr Assembler::ImmSystemRegister(int imm15) {
- ASSERT(is_uint15(imm15));
+ DCHECK(is_uint15(imm15));
return imm15 << ImmSystemRegister_offset;
}
Instr Assembler::ImmHint(int imm7) {
- ASSERT(is_uint7(imm7));
+ DCHECK(is_uint7(imm7));
return imm7 << ImmHint_offset;
}
Instr Assembler::ImmBarrierDomain(int imm2) {
- ASSERT(is_uint2(imm2));
+ DCHECK(is_uint2(imm2));
return imm2 << ImmBarrierDomain_offset;
}
Instr Assembler::ImmBarrierType(int imm2) {
- ASSERT(is_uint2(imm2));
+ DCHECK(is_uint2(imm2));
return imm2 << ImmBarrierType_offset;
}
LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
- ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
return static_cast<LSDataSize>(op >> SizeLS_offset);
}
Instr Assembler::ImmMoveWide(uint64_t imm) {
- ASSERT(is_uint16(imm));
+ DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
Instr Assembler::ShiftMoveWide(int64_t shift) {
- ASSERT(is_uint2(shift));
+ DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset;
}
@@ -1162,7 +1227,7 @@ Instr Assembler::FPType(FPRegister fd) {
Instr Assembler::FPScale(unsigned scale) {
- ASSERT(is_uint6(scale));
+ DCHECK(is_uint6(scale));
return scale << FPScale_offset;
}
@@ -1172,13 +1237,8 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
}
-void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
- LoadRelocatedValue(rt, operand, LDR_x_lit);
-}
-
-
inline void Assembler::CheckBufferSpace() {
- ASSERT(pc_ < (buffer_ + buffer_size_));
+ DCHECK(pc_ < (buffer_ + buffer_size_));
if (buffer_space() < kGap) {
GrowBuffer();
}
@@ -1197,7 +1257,7 @@ inline void Assembler::CheckBuffer() {
TypeFeedbackId Assembler::RecordedAstId() {
- ASSERT(!recorded_ast_id_.IsNone());
+ DCHECK(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 14f414557..7f86e14a7 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -26,49 +26,64 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
-#include "arm64/assembler-arm64-inl.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/cpu.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// CpuFeatures utilities (for V8 compatibility).
+// CpuFeatures implementation.
-ExternalReference ExternalReference::cpu_features() {
- return ExternalReference(&CpuFeatures::supported_);
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ if (cross_compile) {
+ // Always align csp in cross compiled code - this is safe and ensures that
+ // csp will always be aligned if it is enabled by probing at runtime.
+ if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
+ } else {
+ base::CPU cpu;
+ if (FLAG_enable_always_align_csp &&
+ (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
+ supported_ |= 1u << ALWAYS_ALIGN_CSP;
+ }
+ }
}
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
// -----------------------------------------------------------------------------
// CPURegList utilities.
CPURegister CPURegList::PopLowestIndex() {
- ASSERT(IsValid());
+ DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
int index = CountTrailingZeros(list_, kRegListSizeInBits);
- ASSERT((1 << index) & list_);
+ DCHECK((1 << index) & list_);
Remove(index);
return CPURegister::Create(index, size_, type_);
}
CPURegister CPURegList::PopHighestIndex() {
- ASSERT(IsValid());
+ DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
int index = CountLeadingZeros(list_, kRegListSizeInBits);
index = kRegListSizeInBits - 1 - index;
- ASSERT((1 << index) & list_);
+ DCHECK((1 << index) & list_);
Remove(index);
return CPURegister::Create(index, size_, type_);
}
@@ -80,8 +95,8 @@ void CPURegList::RemoveCalleeSaved() {
} else if (type() == CPURegister::kFPRegister) {
Remove(GetCalleeSavedFP(RegisterSizeInBits()));
} else {
- ASSERT(type() == CPURegister::kNoRegister);
- ASSERT(IsEmpty());
+ DCHECK(type() == CPURegister::kNoRegister);
+ DCHECK(IsEmpty());
// The list must already be empty, so do nothing.
}
}
@@ -176,7 +191,7 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+ CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
}
@@ -212,7 +227,7 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
- for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) {
if (regs[i].IsRegister()) {
number_of_valid_regs++;
unique_regs |= regs[i].Bit();
@@ -220,7 +235,7 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
number_of_valid_fpregs++;
unique_fpregs |= regs[i].Bit();
} else {
- ASSERT(!regs[i].IsValid());
+ DCHECK(!regs[i].IsValid());
}
}
@@ -229,8 +244,8 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
int number_of_unique_fpregs =
CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
- ASSERT(number_of_valid_regs >= number_of_unique_regs);
- ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+ DCHECK(number_of_valid_regs >= number_of_unique_regs);
+ DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
return (number_of_valid_regs != number_of_unique_regs) ||
(number_of_valid_fpregs != number_of_unique_fpregs);
@@ -241,7 +256,7 @@ bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister& reg3, const CPURegister& reg4,
const CPURegister& reg5, const CPURegister& reg6,
const CPURegister& reg7, const CPURegister& reg8) {
- ASSERT(reg1.IsValid());
+ DCHECK(reg1.IsValid());
bool match = true;
match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
@@ -254,36 +269,285 @@ bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
}
-void Operand::initialize_handle(Handle<Object> handle) {
+void Immediate::InitializeHandle(Handle<Object> handle) {
AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
- immediate_ = reinterpret_cast<intptr_t>(handle.location());
+ DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ value_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
- immediate_ = reinterpret_cast<intptr_t>(obj);
+ value_ = reinterpret_cast<intptr_t>(obj);
rmode_ = RelocInfo::NONE64;
}
}
-bool Operand::NeedsRelocation(Isolate* isolate) const {
- if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
- return Serializer::enabled(isolate);
+bool Operand::NeedsRelocation(const Assembler* assembler) const {
+ RelocInfo::Mode rmode = immediate_.rmode();
+
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ return assembler->serializer_enabled();
}
- return !RelocInfo::IsNone(rmode_);
+ return !RelocInfo::IsNone(rmode);
}
-// Assembler
+// Constant Pool.
+void ConstPool::RecordEntry(intptr_t data,
+ RelocInfo::Mode mode) {
+ DCHECK(mode != RelocInfo::COMMENT &&
+ mode != RelocInfo::POSITION &&
+ mode != RelocInfo::STATEMENT_POSITION &&
+ mode != RelocInfo::CONST_POOL &&
+ mode != RelocInfo::VENEER_POOL &&
+ mode != RelocInfo::CODE_AGE_SEQUENCE);
+
+ uint64_t raw_data = static_cast<uint64_t>(data);
+ int offset = assm_->pc_offset();
+ if (IsEmpty()) {
+ first_use_ = offset;
+ }
+
+ std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
+ if (CanBeShared(mode)) {
+ shared_entries_.insert(entry);
+ if (shared_entries_.count(entry.first) == 1) {
+ shared_entries_count++;
+ }
+ } else {
+ unique_entries_.push_back(entry);
+ }
+
+ if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
+ // Request constant pool emission after the next instruction.
+ assm_->SetNextConstPoolCheckIn(1);
+ }
+}
+
+
+int ConstPool::DistanceToFirstUse() {
+ DCHECK(first_use_ >= 0);
+ return assm_->pc_offset() - first_use_;
+}
+
+
+int ConstPool::MaxPcOffset() {
+ // There are no pending entries in the pool so we can never get out of
+ // range.
+ if (IsEmpty()) return kMaxInt;
+
+ // Entries are not necessarily emitted in the order they are added so in the
+ // worst case the first constant pool use will be accessing the last entry.
+ return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
+}
+
+
+int ConstPool::WorstCaseSize() {
+ if (IsEmpty()) return 0;
+
+ // Max size prologue:
+ // b over
+ // ldr xzr, #pool_size
+ // blr xzr
+ // nop
+ // All entries are 64-bit for now.
+ return 4 * kInstructionSize + EntryCount() * kPointerSize;
+}
+
+
+int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
+ if (IsEmpty()) return 0;
+
+ // Prologue is:
+ // b over ;; if require_jump
+ // ldr xzr, #pool_size
+ // blr xzr
+ // nop ;; if not 64-bit aligned
+ int prologue_size = require_jump ? kInstructionSize : 0;
+ prologue_size += 2 * kInstructionSize;
+ prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
+ 0 : kInstructionSize;
+
+ // All entries are 64-bit for now.
+ return prologue_size + EntryCount() * kPointerSize;
+}
+
+
+void ConstPool::Emit(bool require_jump) {
+ DCHECK(!assm_->is_const_pool_blocked());
+ // Prevent recursive pool emission and protect from veneer pools.
+ Assembler::BlockPoolsScope block_pools(assm_);
+
+ int size = SizeIfEmittedAtCurrentPc(require_jump);
+ Label size_check;
+ assm_->bind(&size_check);
+
+ assm_->RecordConstPool(size);
+ // Emit the constant pool. It is preceded by an optional branch if
+ // require_jump and a header which will:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // 3) align the pool entries to 64-bit.
+ // The header is therefore made of up to three arm64 instructions:
+ // ldr xzr, #<size of the constant pool in 32-bit words>
+ // blr xzr
+ // nop
+ //
+ // If executed, the header will likely segfault and lr will point to the
+ // instruction following the offending blr.
+ // TODO(all): Make the alignment part less fragile. Currently code is
+ // allocated as a byte array so there are no guarantees the alignment will
+ // be preserved on compaction. Currently it works as allocation seems to be
+ // 64-bit aligned.
+
+ // Emit branch if required
+ Label after_pool;
+ if (require_jump) {
+ assm_->b(&after_pool);
+ }
+
+ // Emit the header.
+ assm_->RecordComment("[ Constant Pool");
+ EmitMarker();
+ EmitGuard();
+ assm_->Align(8);
+
+ // Emit constant pool entries.
+ // TODO(all): currently each relocated constant is 64 bits, consider adding
+ // support for 32-bit entries.
+ EmitEntries();
+ assm_->RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ assm_->bind(&after_pool);
+ }
+
+ DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
+ static_cast<unsigned>(size));
+}
+
+
+void ConstPool::Clear() {
+ shared_entries_.clear();
+ shared_entries_count = 0;
+ unique_entries_.clear();
+ first_use_ = -1;
+}
+
+
+bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
+ // Constant pool currently does not support 32-bit entries.
+ DCHECK(mode != RelocInfo::NONE32);
+
+ return RelocInfo::IsNone(mode) ||
+ (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
+}
+
+void ConstPool::EmitMarker() {
+ // A constant pool size is expressed in number of 32-bits words.
+ // Currently all entries are 64-bit.
+ // + 1 is for the crash guard.
+ // + 0/1 for alignment.
+ int word_count = EntryCount() * 2 + 1 +
+ (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
+ assm_->Emit(LDR_x_lit |
+ Assembler::ImmLLiteral(word_count) |
+ Assembler::Rt(xzr));
+}
+
+
+MemOperand::PairResult MemOperand::AreConsistentForPair(
+ const MemOperand& operandA,
+ const MemOperand& operandB,
+ int access_size_log2) {
+ DCHECK(access_size_log2 >= 0);
+ DCHECK(access_size_log2 <= 3);
+ // Step one: check that they share the same base, that the mode is Offset
+ // and that the offset is a multiple of access size.
+ if (!operandA.base().Is(operandB.base()) ||
+ (operandA.addrmode() != Offset) ||
+ (operandB.addrmode() != Offset) ||
+ ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
+ return kNotPair;
+ }
+ // Step two: check that the offsets are contiguous and that the range
+ // is OK for ldp/stp.
+ if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
+ is_int7(operandA.offset() >> access_size_log2)) {
+ return kPairAB;
+ }
+ if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
+ is_int7(operandB.offset() >> access_size_log2)) {
+ return kPairBA;
+ }
+ return kNotPair;
+}
+
+
+void ConstPool::EmitGuard() {
+#ifdef DEBUG
+ Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
+ DCHECK(instr->preceding()->IsLdrLiteralX() &&
+ instr->preceding()->Rt() == xzr.code());
+#endif
+ assm_->EmitPoolGuard();
+}
+
+
+void ConstPool::EmitEntries() {
+ DCHECK(IsAligned(assm_->pc_offset(), 8));
+
+ typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
+ SharedEntriesIterator value_it;
+ // Iterate through the keys (constant pool values).
+ for (value_it = shared_entries_.begin();
+ value_it != shared_entries_.end();
+ value_it = shared_entries_.upper_bound(value_it->first)) {
+ std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
+ uint64_t data = value_it->first;
+ range = shared_entries_.equal_range(data);
+ SharedEntriesIterator offset_it;
+ // Iterate through the offsets of a given key.
+ for (offset_it = range.first; offset_it != range.second; offset_it++) {
+ Instruction* instr = assm_->InstructionAt(offset_it->second);
+
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+ instr->SetImmPCOffsetTarget(assm_->pc());
+ }
+ assm_->dc64(data);
+ }
+ shared_entries_.clear();
+ shared_entries_count = 0;
+
+ // Emit unique entries.
+ std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
+ for (unique_it = unique_entries_.begin();
+ unique_it != unique_entries_.end();
+ unique_it++) {
+ Instruction* instr = assm_->InstructionAt(unique_it->second);
+
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+ instr->SetImmPCOffsetTarget(assm_->pc());
+ assm_->dc64(unique_it->first);
+ }
+ unique_entries_.clear();
+ first_use_ = -1;
+}
+
+
+// Assembler
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
+ constpool_(this),
recorded_ast_id_(TypeFeedbackId::None()),
unresolved_branches_(),
positions_recorder_(this) {
@@ -294,28 +558,27 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(const_pool_blocked_nesting_ == 0);
- ASSERT(veneer_pool_blocked_nesting_ == 0);
+ DCHECK(constpool_.IsEmpty());
+ DCHECK(const_pool_blocked_nesting_ == 0);
+ DCHECK(veneer_pool_blocked_nesting_ == 0);
}
void Assembler::Reset() {
#ifdef DEBUG
- ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
- ASSERT(const_pool_blocked_nesting_ == 0);
- ASSERT(veneer_pool_blocked_nesting_ == 0);
- ASSERT(unresolved_branches_.empty());
+ DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+ DCHECK(const_pool_blocked_nesting_ == 0);
+ DCHECK(veneer_pool_blocked_nesting_ == 0);
+ DCHECK(unresolved_branches_.empty());
memset(buffer_, 0, pc_ - buffer_);
#endif
pc_ = buffer_;
reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
reinterpret_cast<byte*>(pc_));
- num_pending_reloc_info_ = 0;
+ constpool_.Clear();
next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
ClearRecordedAstId();
}
@@ -323,7 +586,7 @@ void Assembler::Reset() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
+ DCHECK(constpool_.IsEmpty());
// Set up code descriptor.
if (desc) {
@@ -338,7 +601,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
+ DCHECK(m >= 4 && IsPowerOf2(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -366,7 +629,7 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
Label* label,
Instruction* label_veneer) {
- ASSERT(label->is_linked());
+ DCHECK(label->is_linked());
CheckLabelLinkChain(label);
@@ -382,7 +645,7 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
link = next_link;
}
- ASSERT(branch == link);
+ DCHECK(branch == link);
next_link = branch->ImmPCOffsetTarget();
if (branch == prev_link) {
@@ -448,8 +711,8 @@ void Assembler::bind(Label* label) {
// that are linked to this label will be updated to point to the newly-bound
// label.
- ASSERT(!label->is_near_linked());
- ASSERT(!label->is_bound());
+ DCHECK(!label->is_near_linked());
+ DCHECK(!label->is_bound());
DeleteUnresolvedBranchInfoForLabel(label);
@@ -472,11 +735,11 @@ void Assembler::bind(Label* label) {
CheckLabelLinkChain(label);
- ASSERT(linkoffset >= 0);
- ASSERT(linkoffset < pc_offset());
- ASSERT((linkoffset > prevlinkoffset) ||
+ DCHECK(linkoffset >= 0);
+ DCHECK(linkoffset < pc_offset());
+ DCHECK((linkoffset > prevlinkoffset) ||
(linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
- ASSERT(prevlinkoffset >= 0);
+ DCHECK(prevlinkoffset >= 0);
// Update the link to point to the label.
link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
@@ -492,13 +755,13 @@ void Assembler::bind(Label* label) {
}
label->bind_to(pc_offset());
- ASSERT(label->is_bound());
- ASSERT(!label->is_linked());
+ DCHECK(label->is_bound());
+ DCHECK(!label->is_linked());
}
int Assembler::LinkAndGetByteOffsetTo(Label* label) {
- ASSERT(sizeof(*pc_) == 1);
+ DCHECK(sizeof(*pc_) == 1);
CheckLabelLinkChain(label);
int offset;
@@ -513,7 +776,7 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
// Note that offset can be zero for self-referential instructions. (This
// could be useful for ADR, for example.)
offset = label->pos() - pc_offset();
- ASSERT(offset <= 0);
+ DCHECK(offset <= 0);
} else {
if (label->is_linked()) {
// The label is linked, so the referring instruction should be added onto
@@ -522,7 +785,7 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
// In this case, label->pos() returns the offset of the last linked
// instruction from the start of the buffer.
offset = label->pos() - pc_offset();
- ASSERT(offset != kStartOfLabelLinkChain);
+ DCHECK(offset != kStartOfLabelLinkChain);
// Note that the offset here needs to be PC-relative only so that the
// first instruction in a buffer can link to an unbound label. Otherwise,
// the offset would be 0 for this case, and 0 is reserved for
@@ -541,7 +804,7 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
- ASSERT(label->is_linked());
+ DCHECK(label->is_linked());
CheckLabelLinkChain(label);
int link_offset = label->pos();
@@ -576,7 +839,7 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
if (unresolved_branches_.empty()) {
- ASSERT(next_veneer_pool_check_ == kMaxInt);
+ DCHECK(next_veneer_pool_check_ == kMaxInt);
return;
}
@@ -606,8 +869,7 @@ void Assembler::StartBlockConstPool() {
void Assembler::EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
+ DCHECK(pc_offset() < constpool_.MaxPcOffset());
// Two cases:
// * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
// still blocked
@@ -632,7 +894,7 @@ bool Assembler::IsConstantPoolAt(Instruction* instr) {
// It is still worth asserting the marker is complete.
// 4: blr xzr
- ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
+ DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
instr->following()->Rn() == xzr.code()));
return result;
@@ -666,13 +928,6 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
}
-void Assembler::ConstantPoolMarker(uint32_t size) {
- ASSERT(is_const_pool_blocked());
- // + 1 is for the crash guard.
- Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr));
-}
-
-
void Assembler::EmitPoolGuard() {
// We must generate only one instruction as this is used in scopes that
// control the size of the code generated.
@@ -680,18 +935,6 @@ void Assembler::EmitPoolGuard() {
}
-void Assembler::ConstantPoolGuard() {
-#ifdef DEBUG
- // Currently this is only used after a constant pool marker.
- ASSERT(is_const_pool_blocked());
- Instruction* instr = reinterpret_cast<Instruction*>(pc_);
- ASSERT(instr->preceding()->IsLdrLiteralX() &&
- instr->preceding()->Rt() == xzr.code());
-#endif
- EmitPoolGuard();
-}
-
-
void Assembler::StartBlockVeneerPool() {
++veneer_pool_blocked_nesting_;
}
@@ -700,7 +943,7 @@ void Assembler::StartBlockVeneerPool() {
void Assembler::EndBlockVeneerPool() {
if (--veneer_pool_blocked_nesting_ == 0) {
// Check the veneer pool hasn't been blocked for too long.
- ASSERT(unresolved_branches_.empty() ||
+ DCHECK(unresolved_branches_.empty() ||
(pc_offset() < unresolved_branches_first_limit()));
}
}
@@ -708,24 +951,24 @@ void Assembler::EndBlockVeneerPool() {
void Assembler::br(const Register& xn) {
positions_recorder()->WriteRecordedPositions();
- ASSERT(xn.Is64Bits());
+ DCHECK(xn.Is64Bits());
Emit(BR | Rn(xn));
}
void Assembler::blr(const Register& xn) {
positions_recorder()->WriteRecordedPositions();
- ASSERT(xn.Is64Bits());
+ DCHECK(xn.Is64Bits());
// The pattern 'blr xzr' is used as a guard to detect when execution falls
// through the constant pool. It should not be emitted.
- ASSERT(!xn.Is(xzr));
+ DCHECK(!xn.Is(xzr));
Emit(BLR | Rn(xn));
}
void Assembler::ret(const Register& xn) {
positions_recorder()->WriteRecordedPositions();
- ASSERT(xn.Is64Bits());
+ DCHECK(xn.Is64Bits());
Emit(RET | Rn(xn));
}
@@ -796,7 +1039,7 @@ void Assembler::tbz(const Register& rt,
unsigned bit_pos,
int imm14) {
positions_recorder()->WriteRecordedPositions();
- ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@@ -813,7 +1056,7 @@ void Assembler::tbnz(const Register& rt,
unsigned bit_pos,
int imm14) {
positions_recorder()->WriteRecordedPositions();
- ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@@ -827,7 +1070,7 @@ void Assembler::tbnz(const Register& rt,
void Assembler::adr(const Register& rd, int imm21) {
- ASSERT(rd.Is64Bits());
+ DCHECK(rd.Is64Bits());
Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
}
@@ -996,8 +1239,8 @@ void Assembler::eon(const Register& rd,
void Assembler::lslv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
}
@@ -1005,8 +1248,8 @@ void Assembler::lslv(const Register& rd,
void Assembler::lsrv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
}
@@ -1014,8 +1257,8 @@ void Assembler::lsrv(const Register& rd,
void Assembler::asrv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
}
@@ -1023,8 +1266,8 @@ void Assembler::asrv(const Register& rd,
void Assembler::rorv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
}
@@ -1034,7 +1277,7 @@ void Assembler::bfm(const Register& rd,
const Register& rn,
unsigned immr,
unsigned imms) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | BFM | N |
ImmR(immr, rd.SizeInBits()) |
@@ -1047,7 +1290,7 @@ void Assembler::sbfm(const Register& rd,
const Register& rn,
unsigned immr,
unsigned imms) {
- ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ DCHECK(rd.Is64Bits() || rn.Is32Bits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | SBFM | N |
ImmR(immr, rd.SizeInBits()) |
@@ -1060,7 +1303,7 @@ void Assembler::ubfm(const Register& rd,
const Register& rn,
unsigned immr,
unsigned imms) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | UBFM | N |
ImmR(immr, rd.SizeInBits()) |
@@ -1073,8 +1316,8 @@ void Assembler::extr(const Register& rd,
const Register& rn,
const Register& rm,
unsigned lsb) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | EXTR | N | Rm(rm) |
ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
@@ -1114,34 +1357,34 @@ void Assembler::csneg(const Register& rd,
void Assembler::cset(const Register &rd, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
+ DCHECK((cond != al) && (cond != nv));
Register zr = AppropriateZeroRegFor(rd);
- csinc(rd, zr, zr, InvertCondition(cond));
+ csinc(rd, zr, zr, NegateCondition(cond));
}
void Assembler::csetm(const Register &rd, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
+ DCHECK((cond != al) && (cond != nv));
Register zr = AppropriateZeroRegFor(rd);
- csinv(rd, zr, zr, InvertCondition(cond));
+ csinv(rd, zr, zr, NegateCondition(cond));
}
void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- csinc(rd, rn, rn, InvertCondition(cond));
+ DCHECK((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, NegateCondition(cond));
}
void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- csinv(rd, rn, rn, InvertCondition(cond));
+ DCHECK((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, NegateCondition(cond));
}
void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- csneg(rd, rn, rn, InvertCondition(cond));
+ DCHECK((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, NegateCondition(cond));
}
@@ -1150,8 +1393,8 @@ void Assembler::ConditionalSelect(const Register& rd,
const Register& rm,
Condition cond,
ConditionalSelectOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
}
@@ -1184,7 +1427,7 @@ void Assembler::DataProcessing3Source(const Register& rd,
void Assembler::mul(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DCHECK(AreSameSizeAndType(rd, rn, rm));
Register zr = AppropriateZeroRegFor(rn);
DataProcessing3Source(rd, rn, rm, zr, MADD);
}
@@ -1194,7 +1437,7 @@ void Assembler::madd(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
DataProcessing3Source(rd, rn, rm, ra, MADD);
}
@@ -1202,7 +1445,7 @@ void Assembler::madd(const Register& rd,
void Assembler::mneg(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DCHECK(AreSameSizeAndType(rd, rn, rm));
Register zr = AppropriateZeroRegFor(rn);
DataProcessing3Source(rd, rn, rm, zr, MSUB);
}
@@ -1212,7 +1455,7 @@ void Assembler::msub(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
DataProcessing3Source(rd, rn, rm, ra, MSUB);
}
@@ -1221,8 +1464,8 @@ void Assembler::smaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DCHECK(rd.Is64Bits() && ra.Is64Bits());
+ DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
}
@@ -1231,8 +1474,8 @@ void Assembler::smsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DCHECK(rd.Is64Bits() && ra.Is64Bits());
+ DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
}
@@ -1241,8 +1484,8 @@ void Assembler::umaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DCHECK(rd.Is64Bits() && ra.Is64Bits());
+ DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
}
@@ -1251,8 +1494,8 @@ void Assembler::umsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DCHECK(rd.Is64Bits() && ra.Is64Bits());
+ DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
}
@@ -1260,8 +1503,8 @@ void Assembler::umsubl(const Register& rd,
void Assembler::smull(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(rd.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DCHECK(rd.Is64Bits());
+ DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
}
@@ -1269,7 +1512,7 @@ void Assembler::smull(const Register& rd,
void Assembler::smulh(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DCHECK(AreSameSizeAndType(rd, rn, rm));
DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
}
@@ -1277,8 +1520,8 @@ void Assembler::smulh(const Register& rd,
void Assembler::sdiv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
}
@@ -1286,8 +1529,8 @@ void Assembler::sdiv(const Register& rd,
void Assembler::udiv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
}
@@ -1306,7 +1549,7 @@ void Assembler::rev16(const Register& rd,
void Assembler::rev32(const Register& rd,
const Register& rn) {
- ASSERT(rd.Is64Bits());
+ DCHECK(rd.Is64Bits());
DataProcessing1Source(rd, rn, REV);
}
@@ -1346,7 +1589,7 @@ void Assembler::stp(const CPURegister& rt,
void Assembler::ldpsw(const Register& rt,
const Register& rt2,
const MemOperand& src) {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
LoadStorePair(rt, rt2, src, LDPSW_x);
}
@@ -1356,8 +1599,8 @@ void Assembler::LoadStorePair(const CPURegister& rt,
const MemOperand& addr,
LoadStorePairOp op) {
// 'rt' and 'rt2' can only be aliased for stores.
- ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
- ASSERT(AreSameSizeAndType(rt, rt2));
+ DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ DCHECK(AreSameSizeAndType(rt, rt2));
Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
@@ -1367,13 +1610,13 @@ void Assembler::LoadStorePair(const CPURegister& rt,
addrmodeop = LoadStorePairOffsetFixed;
} else {
// Pre-index and post-index modes.
- ASSERT(!rt.Is(addr.base()));
- ASSERT(!rt2.Is(addr.base()));
- ASSERT(addr.offset() != 0);
+ DCHECK(!rt.Is(addr.base()));
+ DCHECK(!rt2.Is(addr.base()));
+ DCHECK(addr.offset() != 0);
if (addr.IsPreIndex()) {
addrmodeop = LoadStorePairPreIndexFixed;
} else {
- ASSERT(addr.IsPostIndex());
+ DCHECK(addr.IsPostIndex());
addrmodeop = LoadStorePairPostIndexFixed;
}
}
@@ -1401,9 +1644,9 @@ void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op) {
- ASSERT(!rt.Is(rt2));
- ASSERT(AreSameSizeAndType(rt, rt2));
- ASSERT(addr.IsImmediateOffset());
+ DCHECK(!rt.Is(rt2));
+ DCHECK(AreSameSizeAndType(rt, rt2));
+ DCHECK(addr.IsImmediateOffset());
LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask));
@@ -1454,32 +1697,28 @@ void Assembler::str(const CPURegister& rt, const MemOperand& src) {
void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
LoadStore(rt, src, LDRSW_x);
}
-void Assembler::ldr(const Register& rt, uint64_t imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // arbitrary values in them. Manually move it for now. Fix
- // MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
+void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
+ // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
+ // constant pool. It should not be emitted.
+ DCHECK(!rt.IsZero());
+ Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
}
-void Assembler::ldr(const FPRegister& ft, double imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // arbitrary values in them. Manually move it for now. Fix
- // MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
-}
-
+void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
+ // Currently we only support 64-bit literals.
+ DCHECK(rt.Is64Bits());
-void Assembler::ldr(const FPRegister& ft, float imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // arbitrary values in them. Manually move it for now. Fix
- // MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
+ RecordRelocInfo(imm.rmode(), imm.value());
+ BlockConstPoolFor(1);
+ // The load will be patched when the constpool is emitted, patching code
+ // expect a load literal with offset 0.
+ ldr_pcrel(rt, 0);
}
@@ -1501,13 +1740,13 @@ void Assembler::mvn(const Register& rd, const Operand& operand) {
void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
}
void Assembler::msr(SystemRegister sysreg, const Register& rt) {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
}
@@ -1533,35 +1772,35 @@ void Assembler::isb() {
void Assembler::fmov(FPRegister fd, double imm) {
- ASSERT(fd.Is64Bits());
- ASSERT(IsImmFP64(imm));
+ DCHECK(fd.Is64Bits());
+ DCHECK(IsImmFP64(imm));
Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
}
void Assembler::fmov(FPRegister fd, float imm) {
- ASSERT(fd.Is32Bits());
- ASSERT(IsImmFP32(imm));
+ DCHECK(fd.Is32Bits());
+ DCHECK(IsImmFP32(imm));
Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
}
void Assembler::fmov(Register rd, FPRegister fn) {
- ASSERT(rd.SizeInBits() == fn.SizeInBits());
+ DCHECK(rd.SizeInBits() == fn.SizeInBits());
FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
Emit(op | Rd(rd) | Rn(fn));
}
void Assembler::fmov(FPRegister fd, Register rn) {
- ASSERT(fd.SizeInBits() == rn.SizeInBits());
+ DCHECK(fd.SizeInBits() == rn.SizeInBits());
FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
Emit(op | Rd(fd) | Rn(rn));
}
void Assembler::fmov(FPRegister fd, FPRegister fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
}
@@ -1656,56 +1895,56 @@ void Assembler::fminnm(const FPRegister& fd,
void Assembler::fabs(const FPRegister& fd,
const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FABS);
}
void Assembler::fneg(const FPRegister& fd,
const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FNEG);
}
void Assembler::fsqrt(const FPRegister& fd,
const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FSQRT);
}
void Assembler::frinta(const FPRegister& fd,
const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FRINTA);
}
void Assembler::frintm(const FPRegister& fd,
const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FRINTM);
}
void Assembler::frintn(const FPRegister& fd,
const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FRINTN);
}
void Assembler::frintz(const FPRegister& fd,
const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FRINTZ);
}
void Assembler::fcmp(const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ DCHECK(fn.SizeInBits() == fm.SizeInBits());
Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
}
@@ -1716,7 +1955,7 @@ void Assembler::fcmp(const FPRegister& fn,
// Although the fcmp instruction can strictly only take an immediate value of
// +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
// affect the result of the comparison.
- ASSERT(value == 0.0);
+ DCHECK(value == 0.0);
Emit(FPType(fn) | FCMP_zero | Rn(fn));
}
@@ -1725,7 +1964,7 @@ void Assembler::fccmp(const FPRegister& fn,
const FPRegister& fm,
StatusFlags nzcv,
Condition cond) {
- ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ DCHECK(fn.SizeInBits() == fm.SizeInBits());
Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
}
@@ -1734,8 +1973,8 @@ void Assembler::fcsel(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
Condition cond) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fm.SizeInBits());
Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
}
@@ -1751,11 +1990,11 @@ void Assembler::fcvt(const FPRegister& fd,
const FPRegister& fn) {
if (fd.Is64Bits()) {
// Convert float to double.
- ASSERT(fn.Is32Bits());
+ DCHECK(fn.Is32Bits());
FPDataProcessing1Source(fd, fn, FCVT_ds);
} else {
// Convert double to float.
- ASSERT(fn.Is64Bits());
+ DCHECK(fn.Is64Bits());
FPDataProcessing1Source(fd, fn, FCVT_sd);
}
}
@@ -1830,7 +2069,7 @@ void Assembler::ucvtf(const FPRegister& fd,
// negated bit.
// If b is 1, then B is 0.
Instr Assembler::ImmFP32(float imm) {
- ASSERT(IsImmFP32(imm));
+ DCHECK(IsImmFP32(imm));
// bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
uint32_t bits = float_to_rawbits(imm);
// bit7: a000.0000
@@ -1845,7 +2084,7 @@ Instr Assembler::ImmFP32(float imm) {
Instr Assembler::ImmFP64(double imm) {
- ASSERT(IsImmFP64(imm));
+ DCHECK(IsImmFP64(imm));
// bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = double_to_rawbits(imm);
@@ -1865,10 +2104,19 @@ void Assembler::MoveWide(const Register& rd,
uint64_t imm,
int shift,
MoveWideImmediateOp mov_op) {
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are zero (a positive 32-bit number) or top
+ // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
+ DCHECK(((imm >> kWRegSizeInBits) == 0) ||
+ ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
+ imm &= kWRegMask;
+ }
+
if (shift >= 0) {
// Explicit shift specified.
- ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
- ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
+ DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
shift /= 16;
} else {
// Calculate a new immediate and shift combination to encode the immediate
@@ -1880,17 +2128,17 @@ void Assembler::MoveWide(const Register& rd,
imm >>= 16;
shift = 1;
} else if ((imm & ~(0xffffUL << 32)) == 0) {
- ASSERT(rd.Is64Bits());
+ DCHECK(rd.Is64Bits());
imm >>= 32;
shift = 2;
} else if ((imm & ~(0xffffUL << 48)) == 0) {
- ASSERT(rd.Is64Bits());
+ DCHECK(rd.Is64Bits());
imm >>= 48;
shift = 3;
}
}
- ASSERT(is_uint16(imm));
+ DCHECK(is_uint16(imm));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
@@ -1902,17 +2150,17 @@ void Assembler::AddSub(const Register& rd,
const Operand& operand,
FlagsUpdate S,
AddSubOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(!operand.NeedsRelocation(isolate()));
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
- ASSERT(IsImmAddSub(immediate));
+ int64_t immediate = operand.ImmediateValue();
+ DCHECK(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
ImmAddSub(immediate) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) {
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
- ASSERT(operand.shift() != ROR);
+ DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
+ DCHECK(operand.shift() != ROR);
// For instructions of the form:
// add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
@@ -1922,14 +2170,14 @@ void Assembler::AddSub(const Register& rd,
// or their 64-bit register equivalents, convert the operand from shifted to
// extended register mode, and emit an add/sub extended instruction.
if (rn.IsSP() || rd.IsSP()) {
- ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DCHECK(!(rd.IsSP() && (S == SetFlags)));
DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
AddSubExtendedFixed | op);
} else {
DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
}
} else {
- ASSERT(operand.IsExtendedRegister());
+ DCHECK(operand.IsExtendedRegister());
DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
}
}
@@ -1940,22 +2188,22 @@ void Assembler::AddSubWithCarry(const Register& rd,
const Operand& operand,
FlagsUpdate S,
AddSubWithCarryOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
- ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
- ASSERT(!operand.NeedsRelocation(isolate()));
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
+ DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ DCHECK(!operand.NeedsRelocation(this));
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
}
void Assembler::hlt(int code) {
- ASSERT(is_uint16(code));
+ DCHECK(is_uint16(code));
Emit(HLT | ImmException(code));
}
void Assembler::brk(int code) {
- ASSERT(is_uint16(code));
+ DCHECK(is_uint16(code));
Emit(BRK | ImmException(code));
}
@@ -1964,7 +2212,7 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which
// might be run on real hardware.
- if (!Serializer::enabled(isolate())) {
+ if (!serializer_enabled()) {
// The arguments to the debug marker need to be contiguous in memory, so
// make sure we don't try to emit pools.
BlockPoolsScope scope(this);
@@ -1975,11 +2223,11 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
// Refer to instructions-arm64.h for a description of the marker and its
// arguments.
hlt(kImmExceptionIsDebug);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+ DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
dc32(code);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+ DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
dc32(params);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+ DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
EmitStringData(message);
hlt(kImmExceptionIsUnreachable);
@@ -1998,15 +2246,15 @@ void Assembler::Logical(const Register& rd,
const Register& rn,
const Operand& operand,
LogicalOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(!operand.NeedsRelocation(isolate()));
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
+ int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
- ASSERT(immediate != 0);
- ASSERT(immediate != -1);
- ASSERT(rd.Is64Bits() || is_uint32(immediate));
+ DCHECK(immediate != 0);
+ DCHECK(immediate != -1);
+ DCHECK(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
if ((op & NOT) == NOT) {
@@ -2023,8 +2271,8 @@ void Assembler::Logical(const Register& rd,
UNREACHABLE();
}
} else {
- ASSERT(operand.IsShiftedRegister());
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ DCHECK(operand.IsShiftedRegister());
+ DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
}
@@ -2051,13 +2299,13 @@ void Assembler::ConditionalCompare(const Register& rn,
Condition cond,
ConditionalCompareOp op) {
Instr ccmpop;
- ASSERT(!operand.NeedsRelocation(isolate()));
+ DCHECK(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
- ASSERT(IsImmConditionalCompare(immediate));
+ int64_t immediate = operand.ImmediateValue();
+ DCHECK(IsImmConditionalCompare(immediate));
ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
} else {
- ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
}
Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
@@ -2067,7 +2315,7 @@ void Assembler::ConditionalCompare(const Register& rn,
void Assembler::DataProcessing1Source(const Register& rd,
const Register& rn,
DataProcessing1SourceOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
Emit(SF(rn) | op | Rn(rn) | Rd(rd));
}
@@ -2083,8 +2331,8 @@ void Assembler::FPDataProcessing2Source(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
FPDataProcessing2SourceOp op) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ DCHECK(fd.SizeInBits() == fn.SizeInBits());
+ DCHECK(fd.SizeInBits() == fm.SizeInBits());
Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
}
@@ -2094,7 +2342,7 @@ void Assembler::FPDataProcessing3Source(const FPRegister& fd,
const FPRegister& fm,
const FPRegister& fa,
FPDataProcessing3SourceOp op) {
- ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
+ DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
}
@@ -2126,7 +2374,7 @@ void Assembler::EmitExtendShift(const Register& rd,
const Register& rn,
Extend extend,
unsigned left_shift) {
- ASSERT(rd.SizeInBits() >= rn.SizeInBits());
+ DCHECK(rd.SizeInBits() >= rn.SizeInBits());
unsigned reg_size = rd.SizeInBits();
// Use the correct size of register.
Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
@@ -2145,7 +2393,7 @@ void Assembler::EmitExtendShift(const Register& rd,
case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
case UXTX:
case SXTX: {
- ASSERT(rn.SizeInBits() == kXRegSizeInBits);
+ DCHECK(rn.SizeInBits() == kXRegSizeInBits);
// Nothing to extend. Just shift.
lsl(rd, rn_, left_shift);
break;
@@ -2164,9 +2412,9 @@ void Assembler::DataProcShiftedRegister(const Register& rd,
const Operand& operand,
FlagsUpdate S,
Instr op) {
- ASSERT(operand.IsShiftedRegister());
- ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
- ASSERT(!operand.NeedsRelocation(isolate()));
+ DCHECK(operand.IsShiftedRegister());
+ DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+ DCHECK(!operand.NeedsRelocation(this));
Emit(SF(rd) | op | Flags(S) |
ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
Rm(operand.reg()) | Rn(rn) | Rd(rd));
@@ -2178,7 +2426,7 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
const Operand& operand,
FlagsUpdate S,
Instr op) {
- ASSERT(!operand.NeedsRelocation(isolate()));
+ DCHECK(!operand.NeedsRelocation(this));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
@@ -2222,18 +2470,18 @@ void Assembler::LoadStore(const CPURegister& rt,
// Shifts are encoded in one bit, indicating a left shift by the memory
// access size.
- ASSERT((shift_amount == 0) ||
+ DCHECK((shift_amount == 0) ||
(shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
} else {
// Pre-index and post-index modes.
- ASSERT(!rt.Is(addr.base()));
+ DCHECK(!rt.Is(addr.base()));
if (IsImmLSUnscaled(offset)) {
if (addr.IsPreIndex()) {
Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
} else {
- ASSERT(addr.IsPostIndex());
+ DCHECK(addr.IsPostIndex());
Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
}
} else {
@@ -2255,25 +2503,9 @@ bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
}
-void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
- ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
- // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
- // constant pool. It should not be emitted.
- ASSERT(!rt.Is(xzr));
- Emit(LDR_x_lit |
- ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
- Rt(rt));
-}
-
-
-void Assembler::LoadRelocatedValue(const CPURegister& rt,
- const Operand& operand,
- LoadLiteralOp op) {
- int64_t imm = operand.immediate();
- ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
- RecordRelocInfo(operand.rmode(), imm);
- BlockConstPoolFor(1);
- Emit(op | ImmLLiteral(0) | Rt(rt));
+bool Assembler::IsImmLSPair(ptrdiff_t offset, LSDataSize size) {
+ bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ return offset_is_size_multiple && is_int7(offset >> size);
}
@@ -2289,94 +2521,200 @@ bool Assembler::IsImmLogical(uint64_t value,
unsigned* n,
unsigned* imm_s,
unsigned* imm_r) {
- ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
- ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
+ DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+ DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
+
+ bool negate = false;
// Logical immediates are encoded using parameters n, imm_s and imm_r using
// the following table:
//
- // N imms immr size S R
- // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
- // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
- // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
- // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
- // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
- // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
- // A pattern is constructed of size bits, where the least significant S+1
- // bits are set. The pattern is rotated right by R, and repeated across a
- // 32 or 64-bit value, depending on destination register width.
+ // A pattern is constructed of size bits, where the least significant S+1 bits
+ // are set. The pattern is rotated right by R, and repeated across a 32 or
+ // 64-bit value, depending on destination register width.
//
- // To test if an arbitary immediate can be encoded using this scheme, an
- // iterative algorithm is used.
+ // Put another way: the basic format of a logical immediate is a single
+ // contiguous stretch of 1 bits, repeated across the whole word at intervals
+ // given by a power of 2. To identify them quickly, we first locate the
+ // lowest stretch of 1 bits, then the next 1 bit above that; that combination
+ // is different for every logical immediate, so it gives us all the
+ // information we need to identify the only logical immediate that our input
+ // could be, and then we simply check if that's the value we actually have.
//
- // TODO(mcapewel) This code does not consider using X/W register overlap to
- // support 64-bit immediates where the top 32-bits are zero, and the bottom
- // 32-bits are an encodable logical immediate.
+ // (The rotation parameter does give the possibility of the stretch of 1 bits
+ // going 'round the end' of the word. To deal with that, we observe that in
+ // any situation where that happens the bitwise NOT of the value is also a
+ // valid logical immediate. So we simply invert the input whenever its low bit
+ // is set, and then we know that the rotated case can't arise.)
- // 1. If the value has all set or all clear bits, it can't be encoded.
- if ((value == 0) || (value == 0xffffffffffffffffUL) ||
- ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
- return false;
+ if (value & 1) {
+ // If the low bit is 1, negate the value, and set a flag to remember that we
+ // did (so that we can adjust the return values appropriately).
+ negate = true;
+ value = ~value;
}
- unsigned lead_zero = CountLeadingZeros(value, width);
- unsigned lead_one = CountLeadingZeros(~value, width);
- unsigned trail_zero = CountTrailingZeros(value, width);
- unsigned trail_one = CountTrailingZeros(~value, width);
- unsigned set_bits = CountSetBits(value, width);
-
- // The fixed bits in the immediate s field.
- // If width == 64 (X reg), start at 0xFFFFFF80.
- // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
- // widths won't be executed.
- int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
- int imm_s_mask = 0x3F;
-
- for (;;) {
- // 2. If the value is two bits wide, it can be encoded.
- if (width == 2) {
- *n = 0;
- *imm_s = 0x3C;
- *imm_r = (value & 3) - 1;
- return true;
- }
+ if (width == kWRegSizeInBits) {
+ // To handle 32-bit logical immediates, the very easiest thing is to repeat
+ // the input value twice to make a 64-bit word. The correct encoding of that
+ // as a logical immediate will also be the correct encoding of the 32-bit
+ // value.
- *n = (width == 64) ? 1 : 0;
- *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
- if ((lead_zero + set_bits) == width) {
- *imm_r = 0;
- } else {
- *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
- }
+ // The most-significant 32 bits may not be zero (ie. negate is true) so
+ // shift the value left before duplicating it.
+ value <<= kWRegSizeInBits;
+ value |= value >> kWRegSizeInBits;
+ }
- // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
- // the bit width of the value, it can be encoded.
- if (lead_zero + trail_zero + set_bits == width) {
- return true;
+ // The basic analysis idea: imagine our input word looks like this.
+ //
+ // 0011111000111110001111100011111000111110001111100011111000111110
+ // c b a
+ // |<--d-->|
+ //
+ // We find the lowest set bit (as an actual power-of-2 value, not its index)
+ // and call it a. Then we add a to our original number, which wipes out the
+ // bottommost stretch of set bits and replaces it with a 1 carried into the
+ // next zero bit. Then we look for the new lowest set bit, which is in
+ // position b, and subtract it, so now our number is just like the original
+ // but with the lowest stretch of set bits completely gone. Now we find the
+ // lowest set bit again, which is position c in the diagram above. Then we'll
+ // measure the distance d between bit positions a and c (using CLZ), and that
+ // tells us that the only valid logical immediate that could possibly be equal
+ // to this number is the one in which a stretch of bits running from a to just
+ // below b is replicated every d bits.
+ uint64_t a = LargestPowerOf2Divisor(value);
+ uint64_t value_plus_a = value + a;
+ uint64_t b = LargestPowerOf2Divisor(value_plus_a);
+ uint64_t value_plus_a_minus_b = value_plus_a - b;
+ uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
+
+ int d, clz_a, out_n;
+ uint64_t mask;
+
+ if (c != 0) {
+ // The general case, in which there is more than one stretch of set bits.
+ // Compute the repeat distance d, and set up a bitmask covering the basic
+ // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
+ // of these cases the N bit of the output will be zero.
+ clz_a = CountLeadingZeros(a, kXRegSizeInBits);
+ int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
+ d = clz_a - clz_c;
+ mask = ((V8_UINT64_C(1) << d) - 1);
+ out_n = 0;
+ } else {
+ // Handle degenerate cases.
+ //
+ // If any of those 'find lowest set bit' operations didn't find a set bit at
+ // all, then the word will have been zero thereafter, so in particular the
+ // last lowest_set_bit operation will have returned zero. So we can test for
+ // all the special case conditions in one go by seeing if c is zero.
+ if (a == 0) {
+ // The input was zero (or all 1 bits, which will come to here too after we
+ // inverted it at the start of the function), for which we just return
+ // false.
+ return false;
+ } else {
+ // Otherwise, if c was zero but a was not, then there's just one stretch
+ // of set bits in our word, meaning that we have the trivial case of
+ // d == 64 and only one 'repetition'. Set up all the same variables as in
+ // the general case above, and set the N bit in the output.
+ clz_a = CountLeadingZeros(a, kXRegSizeInBits);
+ d = 64;
+ mask = ~V8_UINT64_C(0);
+ out_n = 1;
}
+ }
- // 4. If the sum of leading ones, trailing ones and unset bits in the
- // value is equal to the bit width of the value, it can be encoded.
- if (lead_one + trail_one + (width - set_bits) == width) {
- return true;
- }
+ // If the repeat period d is not a power of two, it can't be encoded.
+ if (!IS_POWER_OF_TWO(d)) {
+ return false;
+ }
- // 5. If the most-significant half of the bitwise value is equal to the
- // least-significant half, return to step 2 using the least-significant
- // half of the value.
- uint64_t mask = (1UL << (width >> 1)) - 1;
- if ((value & mask) == ((value >> (width >> 1)) & mask)) {
- width >>= 1;
- set_bits >>= 1;
- imm_s_fixed >>= 1;
- continue;
- }
+ if (((b - a) & ~mask) != 0) {
+ // If the bit stretch (b - a) does not fit within the mask derived from the
+ // repeat period, then fail.
+ return false;
+ }
- // 6. Otherwise, the value can't be encoded.
+ // The only possible option is b - a repeated every d bits. Now we're going to
+ // actually construct the valid logical immediate derived from that
+ // specification, and see if it equals our original input.
+ //
+ // To repeat a value every d bits, we multiply it by a number of the form
+ // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
+ // be derived using a table lookup on CLZ(d).
+ static const uint64_t multipliers[] = {
+ 0x0000000000000001UL,
+ 0x0000000100000001UL,
+ 0x0001000100010001UL,
+ 0x0101010101010101UL,
+ 0x1111111111111111UL,
+ 0x5555555555555555UL,
+ };
+ int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
+ // Ensure that the index to the multipliers array is within bounds.
+ DCHECK((multiplier_idx >= 0) &&
+ (static_cast<size_t>(multiplier_idx) < ARRAY_SIZE(multipliers)));
+ uint64_t multiplier = multipliers[multiplier_idx];
+ uint64_t candidate = (b - a) * multiplier;
+
+ if (value != candidate) {
+ // The candidate pattern doesn't match our input value, so fail.
return false;
}
+
+ // We have a match! This is a valid logical immediate, so now we have to
+ // construct the bits and pieces of the instruction encoding that generates
+ // it.
+
+ // Count the set bits in our basic stretch. The special case of clz(0) == -1
+ // makes the answer come out right for stretches that reach the very top of
+ // the word (e.g. numbers like 0xffffc00000000000).
+ int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
+ int s = clz_a - clz_b;
+
+ // Decide how many bits to rotate right by, to put the low bit of that basic
+ // stretch in position a.
+ int r;
+ if (negate) {
+ // If we inverted the input right at the start of this function, here's
+ // where we compensate: the number of set bits becomes the number of clear
+ // bits, and the rotation count is based on position b rather than position
+ // a (since b is the location of the 'lowest' 1 bit after inversion).
+ s = d - s;
+ r = (clz_b + 1) & (d - 1);
+ } else {
+ r = (clz_a + 1) & (d - 1);
+ }
+
+ // Now we're done, except for having to encode the S output in such a way that
+ // it gives both the number of set bits and the length of the repeated
+ // segment. The s field is encoded like this:
+ //
+ // imms size S
+ // ssssss 64 UInt(ssssss)
+ // 0sssss 32 UInt(sssss)
+ // 10ssss 16 UInt(ssss)
+ // 110sss 8 UInt(sss)
+ // 1110ss 4 UInt(ss)
+ // 11110s 2 UInt(s)
+ //
+ // So we 'or' (-d << 1) with our computed s to form imms.
+ *n = out_n;
+ *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
+ *imm_r = r;
+
+ return true;
}
@@ -2439,9 +2777,7 @@ void Assembler::GrowBuffer() {
// Compute new buffer size.
CodeDesc desc; // the new buffer
- if (buffer_size_ < 4 * KB) {
- desc.buffer_size = 4 * KB;
- } else if (buffer_size_ < 1 * MB) {
+ if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2 * buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1 * MB;
@@ -2476,15 +2812,7 @@ void Assembler::GrowBuffer() {
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
- // Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
+ // Pending relocation entries are also relative, no need to relocate.
}
@@ -2496,7 +2824,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
(rmode == RelocInfo::CONST_POOL) ||
(rmode == RelocInfo::VENEER_POOL)) {
// Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode)
@@ -2504,11 +2832,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ constpool_.RecordEntry(data, rmode);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
@@ -2516,12 +2840,11 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!RelocInfo::IsNone(rmode)) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
- return;
- }
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
}
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(
reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
@@ -2537,11 +2860,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstructionSize;
if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than first_const_pool_use_ + kMaxDistToConstPool
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
no_const_pool_before_ = pc_limit;
+ // Make sure the pool won't be blocked for too long.
+ DCHECK(pc_limit < constpool_.MaxPcOffset());
}
if (next_constant_pool_check_ < no_const_pool_before_) {
@@ -2556,111 +2877,53 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// BlockConstPoolScope.
if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
- ASSERT(!force_emit);
+ DCHECK(!force_emit);
return;
}
// There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
+ if (constpool_.IsEmpty()) {
// Calculate the offset of the next check.
- next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+ SetNextConstPoolCheckIn(kCheckConstPoolInterval);
return;
}
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance to the first instruction accessing the constant pool is
- // kAvgDistToConstPool or more.
- // * no jump is required and the distance to the first instruction accessing
- // the constant pool is at least kMaxDistToPConstool / 2.
- ASSERT(first_const_pool_use_ >= 0);
- int dist = pc_offset() - first_const_pool_use_;
- if (!force_emit && dist < kAvgDistToConstPool &&
- (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
+ // kApproxMaxDistToConstPool or more.
+ // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
+ int dist = constpool_.DistanceToFirstUse();
+ int count = constpool_.EntryCount();
+ if (!force_emit &&
+ (dist < kApproxMaxDistToConstPool) &&
+ (count < kApproxMaxPoolEntryCount)) {
return;
}
- int jump_instr = require_jump ? kInstructionSize : 0;
- int size_pool_marker = kInstructionSize;
- int size_pool_guard = kInstructionSize;
- int pool_size = jump_instr + size_pool_marker + size_pool_guard +
- num_pending_reloc_info_ * kPointerSize;
- int needed_space = pool_size + kGap;
// Emit veneers for branches that would go out of range during emission of the
// constant pool.
- CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
-
- Label size_check;
- bind(&size_check);
+ int worst_case_size = constpool_.WorstCaseSize();
+ CheckVeneerPool(false, require_jump,
+ kVeneerDistanceMargin + worst_case_size);
// Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool, the constant pool marker, the
- // constant pool guard, and the gap to the relocation information).
+ // pool (this includes the gap to the relocation information).
+ int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
- {
- // Block recursive calls to CheckConstPool and protect from veneer pools.
- BlockPoolsScope block_pools(this);
- RecordConstPool(pool_size);
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) {
- b(&after_pool);
- }
-
- // Emit a constant pool header. The header has two goals:
- // 1) Encode the size of the constant pool, for use by the disassembler.
- // 2) Terminate the program, to try to prevent execution from accidentally
- // flowing into the constant pool.
- // The header is therefore made of two arm64 instructions:
- // ldr xzr, #<size of the constant pool in 32-bit words>
- // blr xzr
- // If executed the code will likely segfault and lr will point to the
- // beginning of the constant pool.
- // TODO(all): currently each relocated constant is 64 bits, consider adding
- // support for 32-bit entries.
- RecordComment("[ Constant Pool");
- ConstantPoolMarker(2 * num_pending_reloc_info_);
- ConstantPoolGuard();
-
- // Emit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL &&
- rinfo.rmode() != RelocInfo::VENEER_POOL);
-
- Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- ASSERT(instr->IsLdrLiteral() &&
- instr->ImmLLiteral() == 0);
-
- instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
- dc64(rinfo.data());
- }
-
- num_pending_reloc_info_ = 0;
- first_const_pool_use_ = -1;
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
- }
+ Label size_check;
+ bind(&size_check);
+ constpool_.Emit(require_jump);
+ DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
+ static_cast<unsigned>(worst_case_size));
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
- next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
-
- ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
- static_cast<unsigned>(pool_size));
+ SetNextConstPoolCheckIn(kCheckConstPoolInterval);
}
@@ -2720,7 +2983,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
branch->SetImmPCOffsetTarget(veneer);
b(label);
#ifdef DEBUG
- ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <=
+ DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
static_cast<uint64_t>(kMaxVeneerCodeSize));
veneer_size_check.Unuse();
#endif
@@ -2753,17 +3016,17 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
- ASSERT(next_veneer_pool_check_ == kMaxInt);
+ DCHECK(next_veneer_pool_check_ == kMaxInt);
return;
}
- ASSERT(pc_offset() < unresolved_branches_first_limit());
+ DCHECK(pc_offset() < unresolved_branches_first_limit());
// Some short sequence of instruction mustn't be broken up by veneer pool
// emission, such sequences are protected by calls to BlockVeneerPoolFor and
// BlockVeneerPoolScope.
if (is_veneer_pool_blocked()) {
- ASSERT(!force_emit);
+ DCHECK(!force_emit);
return;
}
@@ -2816,43 +3079,24 @@ void Assembler::RecordConstPool(int size) {
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support.
- ASSERT(!FLAG_enable_ool_constant_pool);
+ DCHECK(!FLAG_enable_ool_constant_pool);
return isolate->factory()->empty_constant_pool_array();
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
// No out-of-line constant pool support.
- ASSERT(!FLAG_enable_ool_constant_pool);
+ DCHECK(!FLAG_enable_ool_constant_pool);
return;
}
-void PatchingAssembler::MovInt64(const Register& rd, int64_t imm) {
- Label start;
- bind(&start);
-
- ASSERT(rd.Is64Bits());
- ASSERT(!rd.IsSP());
-
- for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
- uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
- movk(rd, imm16, 16 * i);
- }
-
- ASSERT(SizeOfCodeGeneratedSince(&start) ==
- kMovInt64NInstrs * kInstructionSize);
-}
-
-
-void PatchingAssembler::PatchAdrFar(Instruction* target) {
+void PatchingAssembler::PatchAdrFar(ptrdiff_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
// nop (adr_far)
// nop (adr_far)
- // nop (adr_far)
// movz scratch, 0
- // add rd, rd, scratch
// Verify the expected code.
Instruction* expected_adr = InstructionAt(0);
@@ -2862,39 +3106,21 @@ void PatchingAssembler::PatchAdrFar(Instruction* target) {
CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
}
Instruction* expected_movz =
- InstructionAt((kAdrFarPatchableNInstrs - 2) * kInstructionSize);
+ InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
CHECK(expected_movz->IsMovz() &&
(expected_movz->ImmMoveWide() == 0) &&
(expected_movz->ShiftMoveWide() == 0));
int scratch_code = expected_movz->Rd();
- Instruction* expected_add =
- InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
- CHECK(expected_add->IsAddSubShifted() &&
- (expected_add->Mask(AddSubOpMask) == ADD) &&
- expected_add->SixtyFourBits() &&
- (expected_add->Rd() == rd_code) && (expected_add->Rn() == rd_code) &&
- (expected_add->Rm() == scratch_code) &&
- (static_cast<Shift>(expected_add->ShiftDP()) == LSL) &&
- (expected_add->ImmDPShift() == 0));
// Patch to load the correct address.
- Label start;
- bind(&start);
Register rd = Register::XRegFromCode(rd_code);
- // If the target is in range, we only patch the adr. Otherwise we patch the
- // nops with fixup instructions.
- int target_offset = expected_adr->DistanceTo(target);
- if (Instruction::IsValidPCRelOffset(target_offset)) {
- adr(rd, target_offset);
- for (int i = 0; i < kAdrFarPatchableNInstrs - 2; ++i) {
- nop(ADR_FAR_NOP);
- }
- } else {
- Register scratch = Register::XRegFromCode(scratch_code);
- adr(rd, 0);
- MovInt64(scratch, target_offset);
- add(rd, rd, scratch);
- }
+ Register scratch = Register::XRegFromCode(scratch_code);
+ // Addresses are only 48 bits.
+ adr(rd, target_offset & 0xFFFF);
+ movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
+ movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
+ DCHECK((target_offset >> 48) == 0);
+ add(rd, rd, scratch);
}
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index a3fbc98d9..1bafce845 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -7,13 +7,13 @@
#include <list>
#include <map>
+#include <vector>
-#include "globals.h"
-#include "utils.h"
-#include "assembler.h"
-#include "serialize.h"
-#include "arm64/instructions-arm64.h"
-#include "arm64/cpu-arm64.h"
+#include "src/arm64/instructions-arm64.h"
+#include "src/assembler.h"
+#include "src/globals.h"
+#include "src/serialize.h"
+#include "src/utils.h"
namespace v8 {
@@ -66,6 +66,7 @@ struct CPURegister {
bool IsValidFPRegister() const;
bool IsNone() const;
bool Is(const CPURegister& other) const;
+ bool Aliases(const CPURegister& other) const;
bool IsZero() const;
bool IsSP() const;
@@ -105,18 +106,18 @@ struct Register : public CPURegister {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
Register(const Register& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
bool IsValid() const {
- ASSERT(IsRegister() || IsNone());
+ DCHECK(IsRegister() || IsNone());
return IsValidRegister();
}
@@ -168,7 +169,7 @@ struct Register : public CPURegister {
}
static Register FromAllocationIndex(unsigned index) {
- ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
// cp is the last allocatable register.
if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
return from_code(kAllocatableContext);
@@ -181,8 +182,8 @@ struct Register : public CPURegister {
}
static const char* AllocationIndexToString(int index) {
- ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
- ASSERT((kAllocatableLowRangeBegin == 0) &&
+ DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
+ DCHECK((kAllocatableLowRangeBegin == 0) &&
(kAllocatableLowRangeEnd == 15) &&
(kAllocatableHighRangeBegin == 18) &&
(kAllocatableHighRangeEnd == 24) &&
@@ -198,7 +199,7 @@ struct Register : public CPURegister {
}
static int ToAllocationIndex(Register reg) {
- ASSERT(reg.IsAllocatable());
+ DCHECK(reg.IsAllocatable());
unsigned code = reg.code();
if (code == kAllocatableContext) {
return NumAllocatableRegisters() - 1;
@@ -234,18 +235,18 @@ struct FPRegister : public CPURegister {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
bool IsValid() const {
- ASSERT(IsFPRegister() || IsNone());
+ DCHECK(IsFPRegister() || IsNone());
return IsValidFPRegister();
}
@@ -281,7 +282,7 @@ struct FPRegister : public CPURegister {
}
static FPRegister FromAllocationIndex(unsigned int index) {
- ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
return (index <= kAllocatableLowRangeEnd)
? from_code(index)
@@ -289,8 +290,8 @@ struct FPRegister : public CPURegister {
}
static const char* AllocationIndexToString(int index) {
- ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
- ASSERT((kAllocatableLowRangeBegin == 0) &&
+ DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
+ DCHECK((kAllocatableLowRangeBegin == 0) &&
(kAllocatableLowRangeEnd == 14) &&
(kAllocatableHighRangeBegin == 16) &&
(kAllocatableHighRangeEnd == 28));
@@ -304,7 +305,7 @@ struct FPRegister : public CPURegister {
}
static int ToAllocationIndex(FPRegister reg) {
- ASSERT(reg.IsAllocatable());
+ DCHECK(reg.IsAllocatable());
unsigned code = reg.code();
return (code <= kAllocatableLowRangeEnd)
@@ -450,40 +451,40 @@ class CPURegList {
CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.SizeInBits()), type_(reg1.type()) {
- ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
- ASSERT(IsValid());
+ DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ DCHECK(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
- ASSERT(IsValid());
+ DCHECK(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) {
- ASSERT(((type == CPURegister::kRegister) &&
+ DCHECK(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
((type == CPURegister::kFPRegister) &&
(last_reg < kNumberOfFPRegisters)));
- ASSERT(last_reg >= first_reg);
+ DCHECK(last_reg >= first_reg);
list_ = (1UL << (last_reg + 1)) - 1;
list_ &= ~((1UL << first_reg) - 1);
- ASSERT(IsValid());
+ DCHECK(IsValid());
}
CPURegister::RegisterType type() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return type_;
}
RegList list() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return list_;
}
inline void set_list(RegList new_list) {
- ASSERT(IsValid());
+ DCHECK(IsValid());
list_ = new_list;
}
@@ -528,7 +529,7 @@ class CPURegList {
static CPURegList GetSafepointSavedRegisters();
bool IsEmpty() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return list_ == 0;
}
@@ -536,7 +537,7 @@ class CPURegList {
const CPURegister& other2 = NoCPUReg,
const CPURegister& other3 = NoCPUReg,
const CPURegister& other4 = NoCPUReg) const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
RegList list = 0;
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
@@ -546,21 +547,26 @@ class CPURegList {
}
int Count() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
unsigned RegisterSizeInBits() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return size_;
}
unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
- ASSERT((size_in_bits % kBitsPerByte) == 0);
+ DCHECK((size_in_bits % kBitsPerByte) == 0);
return size_in_bits / kBitsPerByte;
}
+ unsigned TotalSizeInBytes() const {
+ DCHECK(IsValid());
+ return RegisterSizeInBytes() * Count();
+ }
+
private:
RegList list_;
unsigned size_;
@@ -593,6 +599,31 @@ class CPURegList {
#define kCallerSaved CPURegList::GetCallerSaved()
#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+// -----------------------------------------------------------------------------
+// Immediates.
+class Immediate {
+ public:
+ template<typename T>
+ inline explicit Immediate(Handle<T> handle);
+
+ // This is allowed to be an implicit constructor because Immediate is
+ // a wrapper class that doesn't normally perform any type conversion.
+ template<typename T>
+ inline Immediate(T value); // NOLINT(runtime/explicit)
+
+ template<typename T>
+ inline Immediate(T value, RelocInfo::Mode rmode);
+
+ int64_t value() const { return value_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ private:
+ void InitializeHandle(Handle<Object> value);
+
+ int64_t value_;
+ RelocInfo::Mode rmode_;
+};
+
// -----------------------------------------------------------------------------
// Operands.
@@ -628,8 +659,8 @@ class Operand {
inline Operand(T t); // NOLINT(runtime/explicit)
// Implicit constructor for int types.
- template<typename int_t>
- inline Operand(int_t t, RelocInfo::Mode rmode);
+ template<typename T>
+ inline Operand(T t, RelocInfo::Mode rmode);
inline bool IsImmediate() const;
inline bool IsShiftedRegister() const;
@@ -640,36 +671,33 @@ class Operand {
// which helps in the encoding of instructions that use the stack pointer.
inline Operand ToExtendedRegister() const;
- inline int64_t immediate() const;
+ inline Immediate immediate() const;
+ inline int64_t ImmediateValue() const;
inline Register reg() const;
inline Shift shift() const;
inline Extend extend() const;
inline unsigned shift_amount() const;
// Relocation information.
- RelocInfo::Mode rmode() const { return rmode_; }
- void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
- bool NeedsRelocation(Isolate* isolate) const;
+ bool NeedsRelocation(const Assembler* assembler) const;
// Helpers
inline static Operand UntagSmi(Register smi);
inline static Operand UntagSmiAndScale(Register smi, int scale);
private:
- void initialize_handle(Handle<Object> value);
- int64_t immediate_;
+ Immediate immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
- RelocInfo::Mode rmode_;
};
// MemOperand represents a memory operand in a load or store instruction.
class MemOperand {
public:
- inline explicit MemOperand();
+ inline MemOperand();
inline explicit MemOperand(Register base,
ptrdiff_t offset = 0,
AddrMode addrmode = Offset);
@@ -701,6 +729,16 @@ class MemOperand {
// handle indexed modes.
inline Operand OffsetAsOperand() const;
+ enum PairResult {
+ kNotPair, // Can't use a pair instruction.
+ kPairAB, // Can use a pair instruction (operandA has lower address).
+ kPairBA // Can use a pair instruction (operandB has lower address).
+ };
+ // Check if two MemOperand are consistent for stp/ldp use.
+ static PairResult AreConsistentForPair(const MemOperand& operandA,
+ const MemOperand& operandB,
+ int access_size_log2 = kXRegSizeLog2);
+
private:
Register base_;
Register regoffset_;
@@ -712,6 +750,55 @@ class MemOperand {
};
+class ConstPool {
+ public:
+ explicit ConstPool(Assembler* assm)
+ : assm_(assm),
+ first_use_(-1),
+ shared_entries_count(0) {}
+ void RecordEntry(intptr_t data, RelocInfo::Mode mode);
+ int EntryCount() const {
+ return shared_entries_count + unique_entries_.size();
+ }
+ bool IsEmpty() const {
+ return shared_entries_.empty() && unique_entries_.empty();
+ }
+ // Distance in bytes between the current pc and the first instruction
+ // using the pool. If there are no pending entries return kMaxInt.
+ int DistanceToFirstUse();
+ // Offset after which instructions using the pool will be out of range.
+ int MaxPcOffset();
+ // Maximum size the constant pool can be with current entries. It always
+ // includes alignment padding and branch over.
+ int WorstCaseSize();
+ // Size in bytes of the literal pool *if* it is emitted at the current
+ // pc. The size will include the branch over the pool if it was requested.
+ int SizeIfEmittedAtCurrentPc(bool require_jump);
+ // Emit the literal pool at the current pc with a branch over the pool if
+ // requested.
+ void Emit(bool require_jump);
+ // Discard any pending pool entries.
+ void Clear();
+
+ private:
+ bool CanBeShared(RelocInfo::Mode mode);
+ void EmitMarker();
+ void EmitGuard();
+ void EmitEntries();
+
+ Assembler* assm_;
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_use_;
+ // values, pc offset(s) of entries which can be shared.
+ std::multimap<uint64_t, int> shared_entries_;
+ // Number of distinct literal in shared entries.
+ int shared_entries_count;
+ // values, pc offset of entries which cannot be shared.
+ std::vector<std::pair<uint64_t, int> > unique_entries_;
+};
+
+
// -----------------------------------------------------------------------------
// Assembler.
@@ -735,14 +822,14 @@ class Assembler : public AssemblerBase {
virtual ~Assembler();
virtual void AbortedCodeGeneration() {
- num_pending_reloc_info_ = 0;
+ constpool_.Clear();
}
// System functions ---------------------------------------------------------
// Start generating code from the beginning of the buffer, discarding any code
// and data that has already been emitted into the buffer.
//
- // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // In order to avoid any accidental transfer of state, Reset DCHECKs that the
// constant pool is not blocked.
void Reset();
@@ -782,11 +869,15 @@ class Assembler : public AssemblerBase {
ConstantPoolArray* constant_pool);
inline static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
- Address target);
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(Address pc,
Code* code,
- Address target);
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
// that call in the instruction stream.
@@ -796,6 +887,9 @@ class Assembler : public AssemblerBase {
// instruction stream that call will return from.
inline static Address return_address_from_call_start(Address pc);
+ // Return the code target address of the patch debug break slot
+ inline static Address break_address_from_return_address(Address pc);
+
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -822,15 +916,15 @@ class Assembler : public AssemblerBase {
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
- ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
return pc_ - buffer_;
}
// Return the code size generated from label to the current position.
uint64_t SizeOfCodeGeneratedSince(const Label* label) {
- ASSERT(label->is_bound());
- ASSERT(pc_offset() >= label->pos());
- ASSERT(pc_offset() < buffer_size_);
+ DCHECK(label->is_bound());
+ DCHECK(pc_offset() >= label->pos());
+ DCHECK(pc_offset() < buffer_size_);
return pc_offset() - label->pos();
}
@@ -840,8 +934,8 @@ class Assembler : public AssemblerBase {
// TODO(jbramley): Work out what sign to use for these things and if possible,
// change things to be consistent.
void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
- ASSERT(size >= 0);
- ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+ DCHECK(size >= 0);
+ DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
}
// Return the number of instructions generated from label to the
@@ -859,7 +953,8 @@ class Assembler : public AssemblerBase {
static const int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
- // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
+ // See DebugCodegen::GenerateSlot() and
+ // BreakLocationIterator::SetDebugBreakAtSlot().
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
@@ -879,9 +974,7 @@ class Assembler : public AssemblerBase {
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
- void ConstantPoolMarker(uint32_t size);
void EmitPoolGuard();
- void ConstantPoolGuard();
// Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal
@@ -925,9 +1018,9 @@ class Assembler : public AssemblerBase {
// function, compiled with and without debugger support (see for example
// Debug::PrepareForBreakPoints()).
// Compiling functions with debugger support generates additional code
- // (Debug::GenerateSlot()). This may affect the emission of the pools and
- // cause the version of the code with debugger support to have pools generated
- // in different places.
+ // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
+ // and cause the version of the code with debugger support to have pools
+ // generated in different places.
// Recording the position and size of emitted pools allows to correctly
// compute the offset mappings between the different versions of a function in
// all situations.
@@ -1124,8 +1217,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
@@ -1134,15 +1227,15 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, lsb, lsb + width - 1);
}
// Sbfm aliases.
// Arithmetic shift right.
void asr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.SizeInBits());
+ DCHECK(shift < rd.SizeInBits());
sbfm(rd, rn, shift, rd.SizeInBits() - 1);
}
@@ -1151,8 +1244,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
@@ -1161,8 +1254,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, lsb, lsb + width - 1);
}
@@ -1185,13 +1278,13 @@ class Assembler : public AssemblerBase {
// Logical shift left.
void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.SizeInBits();
- ASSERT(shift < reg_size);
+ DCHECK(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
void lsr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.SizeInBits());
+ DCHECK(shift < rd.SizeInBits());
ubfm(rd, rn, shift, rd.SizeInBits() - 1);
}
@@ -1200,8 +1293,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
@@ -1210,8 +1303,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, lsb, lsb + width - 1);
}
@@ -1358,9 +1451,6 @@ class Assembler : public AssemblerBase {
// Memory instructions.
- // Load literal from pc + offset_from_pc.
- void LoadLiteral(const CPURegister& rt, int offset_from_pc);
-
// Load integer or FP register.
void ldr(const CPURegister& rt, const MemOperand& src);
@@ -1407,12 +1497,11 @@ class Assembler : public AssemblerBase {
void stnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst);
- // Load literal to register.
- void ldr(const Register& rt, uint64_t imm);
+ // Load literal to register from a pc relative address.
+ void ldr_pcrel(const CPURegister& rt, int imm19);
- // Load literal to FP register.
- void ldr(const FPRegister& ft, double imm);
- void ldr(const FPRegister& ft, float imm);
+ // Load literal to register.
+ void ldr(const CPURegister& rt, const Immediate& imm);
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
@@ -1485,7 +1574,7 @@ class Assembler : public AssemblerBase {
};
void nop(NopMarkerTypes n) {
- ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
}
@@ -1646,7 +1735,7 @@ class Assembler : public AssemblerBase {
// subsequent instructions.
void EmitStringData(const char * string) {
size_t len = strlen(string) + 1;
- ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
@@ -1666,7 +1755,9 @@ class Assembler : public AssemblerBase {
// Code generation helpers --------------------------------------------------
- unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+ bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
+
+ Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
@@ -1678,44 +1769,44 @@ class Assembler : public AssemblerBase {
// Register encoding.
static Instr Rd(CPURegister rd) {
- ASSERT(rd.code() != kSPRegInternalCode);
+ DCHECK(rd.code() != kSPRegInternalCode);
return rd.code() << Rd_offset;
}
static Instr Rn(CPURegister rn) {
- ASSERT(rn.code() != kSPRegInternalCode);
+ DCHECK(rn.code() != kSPRegInternalCode);
return rn.code() << Rn_offset;
}
static Instr Rm(CPURegister rm) {
- ASSERT(rm.code() != kSPRegInternalCode);
+ DCHECK(rm.code() != kSPRegInternalCode);
return rm.code() << Rm_offset;
}
static Instr Ra(CPURegister ra) {
- ASSERT(ra.code() != kSPRegInternalCode);
+ DCHECK(ra.code() != kSPRegInternalCode);
return ra.code() << Ra_offset;
}
static Instr Rt(CPURegister rt) {
- ASSERT(rt.code() != kSPRegInternalCode);
+ DCHECK(rt.code() != kSPRegInternalCode);
return rt.code() << Rt_offset;
}
static Instr Rt2(CPURegister rt2) {
- ASSERT(rt2.code() != kSPRegInternalCode);
+ DCHECK(rt2.code() != kSPRegInternalCode);
return rt2.code() << Rt2_offset;
}
// These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register.
static Instr RdSP(Register rd) {
- ASSERT(!rd.IsZero());
+ DCHECK(!rd.IsZero());
return (rd.code() & kRegCodeMask) << Rd_offset;
}
static Instr RnSP(Register rn) {
- ASSERT(!rn.IsZero());
+ DCHECK(!rn.IsZero());
return (rn.code() & kRegCodeMask) << Rn_offset;
}
@@ -1830,7 +1921,6 @@ class Assembler : public AssemblerBase {
void CheckVeneerPool(bool force_emit, bool require_jump,
int margin = kVeneerDistanceMargin);
-
class BlockPoolsScope {
public:
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
@@ -1846,10 +1936,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
- // Available for constrained code generation scopes. Prefer
- // MacroAssembler::Mov() when possible.
- inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
-
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
@@ -1859,6 +1945,10 @@ class Assembler : public AssemblerBase {
static bool IsImmLSUnscaled(ptrdiff_t offset);
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+ void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& addr, LoadStorePairOp op);
+ static bool IsImmLSPair(ptrdiff_t offset, LSDataSize size);
+
void Logical(const Register& rd,
const Register& rn,
const Operand& operand,
@@ -1916,6 +2006,7 @@ class Assembler : public AssemblerBase {
const CPURegister& rt, const CPURegister& rt2);
static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
// Remove the specified branch from the unbound label link chain.
// If available, a veneer for this label can be used for other branches in the
@@ -1940,19 +2031,10 @@ class Assembler : public AssemblerBase {
const Operand& operand,
FlagsUpdate S,
Instr op);
- void LoadStorePair(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairOp op);
void LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op);
- // Register the relocation information for the operand and load its value
- // into rt.
- void LoadRelocatedValue(const CPURegister& rt,
- const Operand& operand,
- LoadLiteralOp op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,
@@ -1999,11 +2081,16 @@ class Assembler : public AssemblerBase {
// instructions.
void BlockConstPoolFor(int instructions);
+ // Set how far from current pc the next constant pool check will be.
+ void SetNextConstPoolCheckIn(int instructions) {
+ next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
+ }
+
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
- ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+ DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
memcpy(pc_, &instruction, sizeof(instruction));
pc_ += sizeof(instruction);
@@ -2012,8 +2099,8 @@ class Assembler : public AssemblerBase {
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
- ASSERT(sizeof(*pc_) == 1);
- ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+ DCHECK(sizeof(*pc_) == 1);
+ DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
// TODO(all): Somehow register we have some data here. Then we can
// disassemble it correctly.
@@ -2030,12 +2117,13 @@ class Assembler : public AssemblerBase {
int next_constant_pool_check_;
// Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
+ // Pools are emitted in the instruction stream. They are emitted when:
+ // * the distance to the first use is above a pre-defined distance or
+ // * the numbers of entries in the pool is above a pre-defined size or
+ // * code generation is finished
+ // If a pool needs to be emitted before code generation is finished a branch
+ // over the emitted pool will be inserted.
+
// Constants in the pool may be addresses of functions that gets relocated;
// if so, a relocation info entry is associated to the constant pool entry.
@@ -2043,34 +2131,22 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstPoolIntervalInst = 128;
- static const int kCheckConstPoolInterval =
- kCheckConstPoolIntervalInst * kInstructionSize;
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant.
- static const int kMaxDistToConstPool = 4 * KB;
- static const int kMaxNumPendingRelocInfo =
- kMaxDistToConstPool / kInstructionSize;
-
-
- // Average distance beetween a constant pool and the first instruction
- // accessing the constant pool. Longer distance should result in less I-cache
- // pollution.
- // In practice the distance will be smaller since constant pool emission is
- // forced after function return and sometimes after unconditional branches.
- static const int kAvgDistToConstPool =
- kMaxDistToConstPool - kCheckConstPoolInterval;
+ static const int kCheckConstPoolInterval = 128;
+
+ // Distance to first use after a which a pool will be emitted. Pool entries
+ // are accessed with pc relative load therefore this cannot be more than
+ // 1 * MB. Since constant pool emission checks are interval based this value
+ // is an approximation.
+ static const int kApproxMaxDistToConstPool = 64 * KB;
+
+ // Number of pool entries after which a pool will be emitted. Since constant
+ // pool emission checks are interval based this value is an approximation.
+ static const int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_const_pool_use_;
-
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -2086,10 +2162,8 @@ class Assembler : public AssemblerBase {
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
+ // The pending constant pool.
+ ConstPool constpool_;
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -2103,7 +2177,7 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void SetRecordedAstId(TypeFeedbackId ast_id) {
- ASSERT(recorded_ast_id_.IsNone());
+ DCHECK(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
@@ -2151,7 +2225,7 @@ class Assembler : public AssemblerBase {
static const int kVeneerDistanceCheckMargin =
kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const {
- ASSERT(!unresolved_branches_.empty());
+ DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
// This is similar to next_constant_pool_check_ and helps reduce the overhead
@@ -2176,6 +2250,7 @@ class Assembler : public AssemblerBase {
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;
+ friend class ConstPool;
};
class PatchingAssembler : public Assembler {
@@ -2203,24 +2278,21 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler() {
// Const pool should still be blocked.
- ASSERT(is_const_pool_blocked());
+ DCHECK(is_const_pool_blocked());
EndBlockPools();
// Verify we have generated the number of instruction we expected.
- ASSERT((pc_offset() + kGap) == buffer_size_);
+ DCHECK((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
- ASSERT(num_pending_reloc_info() == 0);
+ DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
- CPU::FlushICache(buffer_, length);
+ CpuFeatures::FlushICache(buffer_, length);
}
- static const int kMovInt64NInstrs = 4;
- void MovInt64(const Register& rd, int64_t imm);
-
// See definition of PatchAdrFar() for details.
- static const int kAdrFarPatchableNNops = kMovInt64NInstrs - 1;
- static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 3;
- void PatchAdrFar(Instruction* target);
+ static const int kAdrFarPatchableNNops = 2;
+ static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
+ void PatchAdrFar(ptrdiff_t target_offset);
};
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index fec5fef99..2e0aed77a 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -66,7 +66,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
num_extra_args = 1;
__ Push(x1);
} else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects x0 to contain the number of arguments
@@ -294,7 +294,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
__ B(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ Bind(&ok);
@@ -304,7 +304,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -315,12 +314,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -----------------------------------
ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
// Should never create mementos for api functions.
- ASSERT(!is_api_function || !create_memento);
- // Should never create mementos before slack tracking is finished.
- ASSERT(!count_constructions || !create_memento);
+ DCHECK(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
@@ -366,24 +361,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
__ B(eq, &rt_call);
- if (count_constructions) {
+ Register constructon_count = x14;
+ if (!is_api_function) {
Label allocate;
+ MemOperand bit_field3 =
+ FieldMemOperand(init_map, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ Ldr(x4, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(constructon_count, x4);
+ __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
+ __ B(eq, &allocate);
// Decrease generous allocation count.
- __ Ldr(x3, FieldMemOperand(constructor,
- JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
- __ Ldrb(x4, constructor_count);
- __ Subs(x4, x4, 1);
- __ Strb(x4, constructor_count);
+ __ Subs(x4, x4, Operand(1 << Map::ConstructionCount::kShift));
+ __ Str(x4, bit_field3);
+ __ Cmp(constructon_count, Operand(JSFunction::kFinishSlackTracking));
__ B(ne, &allocate);
// Push the constructor and map to the stack, and the constructor again
// as argument to the runtime call.
__ Push(constructor, init_map, constructor);
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(init_map, constructor);
+ __ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking));
__ Bind(&allocate);
}
@@ -413,8 +412,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Add(first_prop, new_obj, JSObject::kHeaderSize);
// Fill all of the in-object properties with the appropriate filler.
- Register undef = x7;
- __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+ Register filler = x7;
+ __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
// Obtain number of pre-allocated property fields and in-object
// properties.
@@ -432,48 +431,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register prop_fields = x6;
__ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize);
- if (count_constructions) {
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
+ __ B(eq, &no_inobject_slack_tracking);
+ constructon_count = NoReg;
+
// Fill the pre-allocated fields with undef.
- __ FillFields(first_prop, prealloc_fields, undef);
+ __ FillFields(first_prop, prealloc_fields, filler);
- // Register first_non_prealloc is the offset of the first field after
+ // Update first_prop register to be the offset of the first field after
// pre-allocated fields.
- Register first_non_prealloc = x12;
- __ Add(first_non_prealloc, first_prop,
+ __ Add(first_prop, first_prop,
Operand(prealloc_fields, LSL, kPointerSizeLog2));
- first_prop = NoReg;
-
if (FLAG_debug_code) {
- Register obj_end = x5;
+ Register obj_end = x14;
__ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
- __ Cmp(first_non_prealloc, obj_end);
+ __ Cmp(first_prop, obj_end);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
// Fill the remaining fields with one pointer filler map.
- Register one_pointer_filler = x5;
- Register non_prealloc_fields = x6;
- __ LoadRoot(one_pointer_filler, Heap::kOnePointerFillerMapRootIndex);
- __ Sub(non_prealloc_fields, prop_fields, prealloc_fields);
- __ FillFields(first_non_prealloc, non_prealloc_fields,
- one_pointer_filler);
- prop_fields = NoReg;
- } else if (create_memento) {
+ __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
+ __ Sub(prop_fields, prop_fields, prealloc_fields);
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+ if (create_memento) {
// Fill the pre-allocated fields with undef.
- __ FillFields(first_prop, prop_fields, undef);
+ __ FillFields(first_prop, prop_fields, filler);
__ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
__ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
// Load the AllocationSite
__ Peek(x14, 2 * kXRegSize);
- ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
first_prop = NoReg;
} else {
// Fill all of the property fields with undef.
- __ FillFields(first_prop, prop_fields, undef);
+ __ FillFields(first_prop, prop_fields, filler);
first_prop = NoReg;
prop_fields = NoReg;
}
@@ -516,7 +517,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Initialize the fields to undefined.
Register elements = x10;
__ Add(elements, new_array, FixedArray::kHeaderSize);
- __ FillFields(elements, element_count, undef);
+ __ FillFields(elements, element_count, filler);
// Store the initialized FixedArray into the properties field of the
// JSObject.
@@ -541,7 +542,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Peek(x4, 2 * kXRegSize);
__ Push(x4);
__ Push(constructor); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
__ Mov(x4, x0);
// If we ended up using the runtime, and we want a memento, then the
// runtime call made it for us, and we shouldn't do create count
@@ -549,7 +550,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ jmp(&count_incremented);
} else {
__ Push(constructor); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ __ CallRuntime(Runtime::kNewObject, 1);
__ Mov(x4, x0);
}
@@ -624,7 +625,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -675,18 +676,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -786,7 +782,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -796,11 +792,11 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
Register function = x1;
// Preserve function. At the same time, push arguments for
- // kHiddenCompileOptimized.
+ // kCompileOptimized.
__ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
__ Push(function, function, x10);
- __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
__ Pop(function);
@@ -910,7 +906,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// preserve the registers with parameters.
__ PushXRegList(kSafepointSavedRegisters);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ PopXRegList(kSafepointSavedRegisters);
}
@@ -940,7 +936,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Mov(x0, Smi::FromInt(static_cast<int>(type)));
__ Push(x0);
- __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it.
@@ -1025,7 +1021,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ B(hs, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1069,7 +1065,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3a. Patch the first argument if necessary when calling a function.
Label shift_arguments;
__ Mov(call_type, static_cast<int>(call_type_JS_func));
- { Label convert_to_object, use_global_receiver, patch_receiver;
+ { Label convert_to_object, use_global_proxy, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
@@ -1093,8 +1089,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ JumpIfSmi(receiver, &convert_to_object);
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
- &use_global_receiver);
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+ &use_global_proxy);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ JumpIfObjectType(receiver, scratch1, scratch2,
@@ -1122,10 +1118,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Mov(call_type, static_cast<int>(call_type_JS_func));
__ B(&patch_receiver);
- __ Bind(&use_global_receiver);
+ __ Bind(&use_global_proxy);
__ Ldr(receiver, GlobalObjectMemOperand());
__ Ldr(receiver,
- FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+ FieldMemOperand(receiver, GlobalObject::kGlobalProxyOffset));
__ Bind(&patch_receiver);
@@ -1250,7 +1246,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
+ __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
@@ -1282,7 +1278,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute and push the receiver.
// Do not transform the receiver for strict mode functions.
- Label convert_receiver_to_object, use_global_receiver;
+ Label convert_receiver_to_object, use_global_proxy;
__ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
__ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
// Do not transform the receiver for native functions.
@@ -1290,9 +1286,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver in sloppy mode.
__ JumpIfSmi(receiver, &convert_receiver_to_object);
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
- &use_global_receiver);
+ &use_global_proxy);
// Check if the receiver is already a JavaScript object.
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
@@ -1306,9 +1302,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Mov(receiver, x0);
__ B(&push_receiver);
- __ Bind(&use_global_receiver);
+ __ Bind(&use_global_proxy);
__ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
// Push the receiver
__ Bind(&push_receiver);
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index a2dd22058..3ef118aae 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -2,354 +2,279 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
-
void FastNewClosureStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x2: function info
- static Register registers[] = { x2 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+ Register registers[] = { cp, x2 };
+ descriptor->Initialize(
+ MajorKey(), ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
}
void FastNewContextStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x1: function
- static Register registers[] = { x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
+ Register registers[] = { cp, x1 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
}
void ToNumberStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
+ Register registers[] = { cp, x0 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
}
void NumberToStringStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
+ Register registers[] = { cp, x0 };
+ descriptor->Initialize(
+ MajorKey(), ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
}
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x3: array literals array
// x2: array literal index
// x1: constant elements
- static Register registers[] = { x3, x2, x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(
- Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
+ Register registers[] = { cp, x3, x2, x1 };
+ Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->Initialize(
+ MajorKey(), ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
+ representations);
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x3: object literals array
// x2: object literal index
// x1: constant properties
// x0: object literal flags
- static Register registers[] = { x3, x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
+ Register registers[] = { cp, x3, x2, x1, x0 };
+ descriptor->Initialize(
+ MajorKey(), ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x2: feedback vector
// x3: call feedback slot
- static Register registers[] = { x2, x3 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
+ Register registers[] = { cp, x2, x3 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
}
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+void CallFunctionStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x0: key
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+ // x1 function the function to call
+ Register registers[] = {cp, x1};
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
}
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+void CallConstructStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x0: key
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, x0, x1, x2};
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
}
void RegExpConstructResultStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x2: length
// x1: index (of last match)
// x0: string
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: receiver
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- static Register registers[] = { x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { x0, x2 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: receiver
- // x1: key
- // x0: value
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+ Register registers[] = { cp, x2, x1, x0 };
+ descriptor->Initialize(
+ MajorKey(), ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
}
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x0: value (js_array)
// x1: to_map
- static Register registers[] = { x0, x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
+ Register registers[] = { cp, x0, x1 };
Address entry =
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(entry));
}
void CompareNilICStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x0: value to compare
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(CompareNilIC_Miss);
+ Register registers[] = { cp, x0 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(CompareNilIC_Miss));
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
+const Register InterfaceDescriptor::ContextRegister() { return cp; }
+
+
static void InitializeArrayConstructorDescriptor(
- CodeStubInterfaceDescriptor* descriptor,
+ CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
+ // cp: context
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
- static Register registers_variable_args[] = { x1, x2, x0 };
- static Register registers_no_args[] = { x1, x2 };
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- descriptor->register_param_count_ =
- sizeof(registers_no_args) / sizeof(registers_no_args[0]);
- descriptor->register_params_ = registers_no_args;
+ Register registers[] = { cp, x1, x2 };
+ descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+ deopt_handler, NULL, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
} else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->stack_parameter_count_ = x0;
- descriptor->register_param_count_ =
- sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
- descriptor->register_params_ = registers_variable_args;
+ Register registers[] = { cp, x1, x2, x0 };
+ Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
+ deopt_handler, representations,
+ constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
-
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(descriptor, 0);
+ InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(descriptor, 1);
+ InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(descriptor, -1);
+ InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
}
static void InitializeInternalArrayConstructorDescriptor(
- CodeStubInterfaceDescriptor* descriptor,
+ CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
+ // cp: context
// x1: constructor function
// x0: number of arguments to the constructor function
- static Register registers_variable_args[] = { x1, x0 };
- static Register registers_no_args[] = { x1 };
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- descriptor->register_param_count_ =
- sizeof(registers_no_args) / sizeof(registers_no_args[0]);
- descriptor->register_params_ = registers_no_args;
+ Register registers[] = { cp, x1 };
+ descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
+ deopt_handler, NULL, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
} else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->stack_parameter_count_ = x0;
- descriptor->register_param_count_ =
- sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
- descriptor->register_params_ = registers_variable_args;
+ Register registers[] = { cp, x1, x0 };
+ Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->Initialize(major, ARRAY_SIZE(registers), registers, x0,
+ deopt_handler, representations,
+ constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
-
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
}
void ToBooleanStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
+ Register registers[] = { cp, x0 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(ToBooleanIC_Miss));
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
-void StoreGlobalStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x2: key (unused)
- // x0: value
- static Register registers[] = { x1, x2, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- // x3: target map
- // x1: key
- // x2: receiver
- static Register registers[] = { x0, x3, x1, x2 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
void BinaryOpICStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x1: left operand
// x0: right operand
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ Register registers[] = { cp, x1, x0 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(BinaryOpIC_Miss));
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
}
@@ -357,120 +282,108 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x2: allocation site
// x1: left operand
// x0: right operand
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+ Register registers[] = { cp, x2, x1, x0 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
}
void StringAddStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
+ // cp: context
// x1: left operand
// x0: right operand
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+ Register registers[] = { cp, x1, x0 };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry);
}
void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- static PlatformCallInterfaceDescriptor default_descriptor =
- PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
- static PlatformCallInterfaceDescriptor noInlineDescriptor =
- PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+ static PlatformInterfaceDescriptor noInlineDescriptor =
+ PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
{
CallInterfaceDescriptor* descriptor =
isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- static Register registers[] = { x1, // JSFunction
- cp, // context
- x0, // actual number of arguments
- x2, // expected number of arguments
+ Register registers[] = { cp, // context
+ x1, // JSFunction
+ x0, // actual number of arguments
+ x2, // expected number of arguments
};
- static Representation representations[] = {
- Representation::Tagged(), // JSFunction
+ Representation representations[] = {
Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ representations, &default_descriptor);
}
{
CallInterfaceDescriptor* descriptor =
isolate->call_descriptor(Isolate::KeyedCall);
- static Register registers[] = { cp, // context
- x2, // key
+ Register registers[] = { cp, // context
+ x2, // key
};
- static Representation representations[] = {
+ Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // key
};
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ representations, &noInlineDescriptor);
}
{
CallInterfaceDescriptor* descriptor =
isolate->call_descriptor(Isolate::NamedCall);
- static Register registers[] = { cp, // context
- x2, // name
+ Register registers[] = { cp, // context
+ x2, // name
};
- static Representation representations[] = {
+ Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // name
};
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ representations, &noInlineDescriptor);
}
{
CallInterfaceDescriptor* descriptor =
isolate->call_descriptor(Isolate::CallHandler);
- static Register registers[] = { cp, // context
- x0, // receiver
+ Register registers[] = { cp, // context
+ x0, // receiver
};
- static Representation representations[] = {
+ Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // receiver
};
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ representations, &default_descriptor);
}
{
CallInterfaceDescriptor* descriptor =
isolate->call_descriptor(Isolate::ApiFunctionCall);
- static Register registers[] = { x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
- cp, // context
+ Register registers[] = { cp, // context
+ x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
};
- static Representation representations[] = {
+ Representation representations[] = {
+ Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
- Representation::Tagged(), // context
};
- descriptor->register_param_count_ = 5;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
+ descriptor->Initialize(ARRAY_SIZE(registers), registers,
+ representations, &default_descriptor);
}
}
@@ -483,22 +396,22 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
isolate()->counters()->code_stubs()->Increment();
CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
- int param_count = descriptor->register_param_count_;
+ int param_count = descriptor->GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
- ASSERT((descriptor->register_param_count_ == 0) ||
- x0.Is(descriptor->register_params_[param_count - 1]));
+ DCHECK((param_count == 0) ||
+ x0.Is(descriptor->GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
- queue.Queue(descriptor->register_params_[i]);
+ queue.Queue(descriptor->GetEnvironmentParameterRegister(i));
}
queue.PushQueued();
ExternalReference miss = descriptor->miss_handler();
- __ CallExternalReference(miss, descriptor->register_param_count_);
+ __ CallExternalReference(miss, param_count);
}
__ Ret();
@@ -509,10 +422,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label done;
Register input = source();
Register result = destination();
- ASSERT(is_truncating());
+ DCHECK(is_truncating());
- ASSERT(result.Is64Bits());
- ASSERT(jssp.Is(masm->StackPointer()));
+ DCHECK(result.Is64Bits());
+ DCHECK(jssp.Is(masm->StackPointer()));
int double_offset = offset();
@@ -592,7 +505,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
FPRegister double_scratch,
Label* slow,
Condition cond) {
- ASSERT(!AreAliased(left, right, scratch));
+ DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@@ -647,7 +560,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// it is handled in the parser (see Parser::ParseBinaryExpression). We are
// only concerned with cases ge, le and eq here.
if ((cond != lt) && (cond != gt)) {
- ASSERT((cond == ge) || (cond == le) || (cond == eq));
+ DCHECK((cond == ge) || (cond == le) || (cond == eq));
__ Bind(&heap_number);
// Left and right are identical pointers to a heap number object. Return
// non-equal if the heap number is a NaN, and equal otherwise. Comparing
@@ -680,7 +593,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register left_type,
Register right_type,
Register scratch) {
- ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+ DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
if (masm->emit_debug_code()) {
// We assume that the arguments are not identical.
@@ -698,7 +611,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ B(lt, &right_non_object);
// Return non-zero - x0 already contains a non-zero pointer.
- ASSERT(left.is(x0) || right.is(x0));
+ DCHECK(left.is(x0) || right.is(x0));
Label return_not_equal;
__ Bind(&return_not_equal);
__ Ret();
@@ -736,9 +649,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register scratch,
Label* slow,
bool strict) {
- ASSERT(!AreAliased(left, right, scratch));
- ASSERT(!AreAliased(left_d, right_d));
- ASSERT((left.is(x0) && right.is(x1)) ||
+ DCHECK(!AreAliased(left, right, scratch));
+ DCHECK(!AreAliased(left_d, right_d));
+ DCHECK((left.is(x0) && right.is(x1)) ||
(right.is(x0) && left.is(x1)));
Register result = x0;
@@ -811,7 +724,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register right_type,
Label* possible_strings,
Label* not_both_strings) {
- ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+ DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
Register result = x0;
Label object_test;
@@ -931,7 +844,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Left and/or right is a NaN. Load the result register with whatever makes
// the comparison fail, since comparisons with NaN always fail (except ne,
// which is filtered out at a higher level.)
- ASSERT(cond != ne);
+ DCHECK(cond != ne);
if ((cond == lt) || (cond == le)) {
__ Mov(result, GREATER);
} else {
@@ -1022,7 +935,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
} else {
- ASSERT((cond == gt) || (cond == ge)); // remaining cases
+ DCHECK((cond == gt) || (cond == ge)); // remaining cases
ncr = LESS;
}
__ Mov(x10, Smi::FromInt(ncr));
@@ -1086,11 +999,7 @@ void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
// Restore lr with the value it had before the call to this stub (the value
// which must be pushed).
__ Mov(lr, saved_lr);
- if (save_doubles_ == kSaveFPRegs) {
- __ PushSafepointRegistersAndDoubles();
- } else {
- __ PushSafepointRegisters();
- }
+ __ PushSafepointRegisters();
__ Ret(return_address);
}
@@ -1101,11 +1010,7 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
Register return_address = temps.AcquireX();
// Preserve the return address (lr will be clobbered by the pop).
__ Mov(return_address, lr);
- if (save_doubles_ == kSaveFPRegs) {
- __ PopSafepointRegistersAndDoubles();
- } else {
- __ PopSafepointRegisters();
- }
+ __ PopSafepointRegisters();
__ Ret(return_address);
}
@@ -1332,13 +1237,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Bind(&call_runtime);
// Put the arguments back on the stack.
__ Push(base_tagged, exponent_tagged);
- __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
// Return.
__ Bind(&done);
__ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
result_double);
- ASSERT(result_tagged.is(x0));
+ DCHECK(result_tagged.is(x0));
__ IncrementCounter(
isolate()->counters()->math_pow(), 1, scratch0, scratch1);
__ Ret();
@@ -1377,18 +1282,14 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
+ StoreRegistersStateStub stub(isolate);
+ stub.GetCode();
}
void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
- stub1.GetCode();
- RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
+ RestoreRegistersStateStub stub(isolate);
+ stub.GetCode();
}
@@ -1446,7 +1347,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
//
// The arguments are in reverse order, so that arg[argc-2] is actually the
// first argument to the target function and arg[0] is the last.
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
const Register& argc_input = x0;
const Register& target_input = x1;
@@ -1473,7 +1374,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// registers.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_, x10, 3);
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
__ Poke(argv, 1 * kPointerSize);
@@ -1523,7 +1424,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// untouched, and the stub either throws an exception by jumping to one of
// the exception_returned label.
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
// Prepare AAPCS64 arguments to pass to the builtin.
__ Mov(x0, argc);
@@ -1570,7 +1471,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(target, 3 * kPointerSize);
__ LeaveExitFrame(save_doubles_, x10, true);
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
// Pop or drop the remaining stack slots and return from the stub.
// jssp[24]: Arguments array (of size argc), including receiver.
// jssp[16]: Preserved x23 (used for target).
@@ -1642,7 +1543,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Output:
// x0: result.
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
Register code_entry = x0;
// Enable instruction instrumentation. This only works on the simulator, and
@@ -1696,7 +1597,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ B(&done);
__ Bind(&non_outermost_js);
// We spare one instruction by pushing xzr since the marker is 0.
- ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
__ Push(xzr);
__ Bind(&done);
@@ -1798,7 +1699,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Reset the stack to the callee saved registers.
__ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
// Restore the callee-saved registers and return.
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Mov(csp, jssp);
__ SetStackPointer(csp);
__ PopCalleeSavedRegisters();
@@ -1810,33 +1711,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x1 : receiver
- // -- x0 : key
- // -----------------------------------
- Register key = x0;
- receiver = x1;
- __ Cmp(key, Operand(isolate()->factory()->prototype_string()));
- __ B(ne, &miss);
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x2 : name
- // -- x0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = x0;
- }
+ Register receiver = LoadIC::ReceiverRegister();
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
+ x11, &miss);
__ Bind(&miss);
- StubCompiler::TailCallBuiltin(masm,
- BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
}
@@ -1884,7 +1766,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// If there is a call site cache, don't look in the global cache, but do the
// real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
__ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
@@ -1916,6 +1798,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
Label return_true, return_result;
+ Register smi_value = scratch1;
{
// Loop through the prototype chain looking for the function prototype.
Register chain_map = x1;
@@ -1926,6 +1809,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
// Speculatively set a result.
__ Mov(result, res_false);
+ if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
+ // Value to store in the cache cannot be an object.
+ __ Mov(smi_value, Smi::FromInt(1));
+ }
__ Bind(&loop);
@@ -1948,14 +1835,19 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// We cannot fall through to here.
__ Bind(&return_true);
__ Mov(result, res_true);
+ if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
+ // Value to store in the cache cannot be an object.
+ __ Mov(smi_value, Smi::FromInt(0));
+ }
__ Bind(&return_result);
if (HasCallSiteInlineCheck()) {
- ASSERT(ReturnTrueFalseObject());
+ DCHECK(ReturnTrueFalseObject());
__ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
__ GetRelocatedValueLocation(map_check_site, scratch2);
__ Str(result, MemOperand(scratch2));
} else {
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ Register cached_value = ReturnTrueFalseObject() ? smi_value : result;
+ __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex);
}
__ Ret();
@@ -2082,9 +1974,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
Register caller_fp = x10;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Load and untag the context.
- STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
- __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
- (kSmiShift / kBitsPerByte)));
+ __ Ldr(w11, UntagSmiMemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
__ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
__ B(ne, &runtime);
@@ -2097,7 +1988,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ Poke(x10, 1 * kXRegSize);
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -2197,41 +2088,42 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Get the arguments boilerplate from the current (global) context.
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
- // x7 param_count number of function parameters
- // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
+ // x14 recv_arg pointer to receiver arguments
Register global_object = x10;
Register global_ctx = x10;
- Register args_offset = x11;
- Register aliased_args_offset = x10;
+ Register sloppy_args_map = x11;
+ Register aliased_args_map = x10;
__ Ldr(global_object, GlobalObjectMemOperand());
__ Ldr(global_ctx, FieldMemOperand(global_object,
GlobalObject::kNativeContextOffset));
- __ Ldr(args_offset,
- ContextMemOperand(global_ctx,
- Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
- __ Ldr(aliased_args_offset,
- ContextMemOperand(global_ctx,
- Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(sloppy_args_map,
+ ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Ldr(aliased_args_map,
+ ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
__ Cmp(mapped_params, 0);
- __ CmovX(args_offset, aliased_args_offset, ne);
+ __ CmovX(sloppy_args_map, aliased_args_map, ne);
// Copy the JS object part.
- __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
- JSObject::kHeaderSize / kPointerSize);
+ __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+ __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+ __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ AssertNotSmi(function);
__ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
// Use the length and set that as an in-object property.
@@ -2369,7 +2261,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -2432,25 +2324,24 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments boilerplate from the current (native) context.
Register global_object = x10;
Register global_ctx = x10;
- Register args_offset = x4;
+ Register strict_args_map = x4;
__ Ldr(global_object, GlobalObjectMemOperand());
__ Ldr(global_ctx, FieldMemOperand(global_object,
GlobalObject::kNativeContextOffset));
- __ Ldr(args_offset,
- ContextMemOperand(global_ctx,
- Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(strict_args_map,
+ ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
// x1 param_count_smi number of parameters passed to function (smi)
// x2 params pointer to parameters
// x3 function function pointer
- // x4 args_offset offset to arguments boilerplate
+ // x4 strict_args_map offset to arguments map
// x13 param_count number of parameters passed to function
-
- // Copy the JS object part.
- __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
- JSObject::kHeaderSize / kPointerSize);
+ __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+ __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+ __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
// Set the smi-tagged length as an in-object property.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
@@ -2502,13 +2393,13 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, params, param_count_smi);
- __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2587,7 +2478,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Cbz(x10, &runtime);
// Check that the first argument is a JSRegExp object.
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(jsregexp_object, kJSRegExpOffset);
__ JumpIfSmi(jsregexp_object, &runtime);
__ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
@@ -2624,7 +2515,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Initialize offset for possibly sliced string.
__ Mov(sliced_string_offset, 0);
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(subject, kSubjectOffset);
__ JumpIfSmi(subject, &runtime);
@@ -2696,8 +2587,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ TestAndBranchIfAnySet(string_type.X(),
kStringRepresentationMask,
&external_string); // Go to (7).
@@ -2707,7 +2598,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(x10, kPreviousIndexOffset);
__ JumpIfNotSmi(x10, &runtime);
__ Cmp(jsstring_length, x10);
@@ -2725,7 +2616,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Find the code object based on the assumptions above.
// kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
- ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ DCHECK_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
// We will need the encoding later: ASCII = 0x04
@@ -2749,7 +2640,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Isolates: note we add an additional parameter here (isolate pointer).
__ EnterExitFrame(false, x10, 1);
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
// We have 9 arguments to pass to the regexp code, therefore we have to pass
// one on the stack and the rest as registers.
@@ -2853,7 +2744,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Add(number_of_capture_registers, x10, 2);
// Check that the fourth object is a JSArray object.
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(x10, kLastMatchInfoOffset);
__ JumpIfSmi(x10, &runtime);
__ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
@@ -2932,8 +2823,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the smi values in the last match info.
__ SmiTag(x10, current_offset);
// Clearing the 32 bottom bits gives us a Smi.
- STATIC_ASSERT(kSmiShift == 32);
- __ And(x11, current_offset, ~kWRegMask);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Bic(x11, current_offset, kSmiShiftMask);
__ Stp(x10,
x11,
MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
@@ -2982,7 +2873,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Bind(&runtime);
__ PopCPURegList(used_callee_saved_registers);
- __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3035,7 +2926,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
Register scratch1,
Register scratch2) {
ASM_LOCATION("GenerateRecordCallTarget");
- ASSERT(!AreAliased(scratch1, scratch2,
+ DCHECK(!AreAliased(scratch1, scratch2,
argc, function, feedback_vector, index));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
@@ -3045,9 +2936,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
@@ -3112,7 +3003,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
// CreateAllocationSiteStub expect the feedback vector in x2 and the slot
// index in x3.
- ASSERT(feedback_vector.Is(x2) && index.Is(x3));
+ DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(&create_stub);
__ Pop(index, feedback_vector, function, argc);
@@ -3192,10 +3083,10 @@ static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallFunctionStub::Generate");
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
// x1 function the function to call
-
Register function = x1;
Register type = x4;
Label slow, non_function, wrap, cont;
@@ -3203,7 +3094,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// TODO(jbramley): This function has a lot of unnamed registers. Name them,
// and tidy things up a bit.
- if (NeedsChecks()) {
+ if (needs_checks) {
// Check that the function is really a JavaScript function.
__ JumpIfSmi(function, &non_function);
@@ -3213,18 +3104,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Fast-case: Invoke the function now.
// x1 function pushed function
- int argc = argc_;
ParameterCount actual(argc);
- if (CallAsMethod()) {
- if (NeedsChecks()) {
+ if (call_as_method) {
+ if (needs_checks) {
EmitContinueIfStrictOrNative(masm, &cont);
}
// Compute the receiver in sloppy mode.
__ Peek(x3, argc * kPointerSize);
- if (NeedsChecks()) {
+ if (needs_checks) {
__ JumpIfSmi(x3, &wrap);
__ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
} else {
@@ -3238,20 +3128,25 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
actual,
JUMP_FUNCTION,
NullCallWrapper());
-
- if (NeedsChecks()) {
+ if (needs_checks) {
// Slow-case: Non-function called.
__ Bind(&slow);
EmitSlowCase(masm, argc, function, type, &non_function);
}
- if (CallAsMethod()) {
+ if (call_as_method) {
__ Bind(&wrap);
EmitWrapCase(masm, argc, &cont);
}
}
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallFunctionStub::Generate");
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallConstructStub::Generate");
// x0 : number of arguments
@@ -3331,6 +3226,50 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
}
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // x1 - function
+ // x3 - slot id
+ Label miss;
+ Register function = x1;
+ Register feedback_vector = x2;
+ Register index = x3;
+ Register scratch = x4;
+
+ EmitLoadTypeFeedbackVector(masm, feedback_vector);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
+ __ Cmp(function, scratch);
+ __ B(ne, &miss);
+
+ __ Mov(x0, Operand(arg_count()));
+
+ __ Add(scratch, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+ // Verify that scratch contains an AllocationSite
+ Register map = x5;
+ __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ Register allocation_site = feedback_vector;
+ __ Mov(allocation_site, scratch);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ __ Unreachable();
+}
+
+
void CallICStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallICStub");
@@ -3390,7 +3329,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ JumpIfRoot(x4, Heap::kUninitializedSymbolRootIndex, &miss);
if (!FLAG_trace_ic) {
- // We are going megamorphic, and we don't want to visit the runtime.
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(x4);
+ __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kMegamorphicSymbolRootIndex);
@@ -3400,7 +3342,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We are here because tracing is on or we are going monomorphic.
__ bind(&miss);
- GenerateMiss(masm);
+ GenerateMiss(masm, IC::kCallIC_Miss);
// the slow case
__ bind(&slow_start);
@@ -3414,7 +3356,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
ASM_LOCATION("CallICStub[Miss]");
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -3427,7 +3369,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x4, x1, x2, x3);
// Call the entry.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCallIC_Miss),
+ ExternalReference miss = ExternalReference(IC_Utility(id),
masm->isolate());
__ CallExternalReference(miss, 4);
@@ -3487,9 +3429,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -3512,7 +3454,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3528,8 +3470,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
- __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+ __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
__ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
__ Bind(&exit_);
@@ -3555,7 +3496,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
- ASSERT(state_ == CompareIC::SMI);
+ DCHECK(state_ == CompareIC::SMI);
ASM_LOCATION("ICCompareStub[Smis]");
Label miss;
// Bail out (to 'miss') unless both x0 and x1 are smis.
@@ -3577,7 +3518,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
+ DCHECK(state_ == CompareIC::NUMBER);
ASM_LOCATION("ICCompareStub[HeapNumbers]");
Label unordered, maybe_undefined1, maybe_undefined2;
@@ -3645,7 +3586,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
ASM_LOCATION("ICCompareStub[InternalizedStrings]");
Label miss;
@@ -3683,9 +3624,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ DCHECK(state_ == CompareIC::UNIQUE_NAME);
ASM_LOCATION("ICCompareStub[UniqueNames]");
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
Label miss;
Register result = x0;
@@ -3722,7 +3663,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+ DCHECK(state_ == CompareIC::STRING);
ASM_LOCATION("ICCompareStub[Strings]");
Label miss;
@@ -3763,7 +3704,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// because we already know they are not identical. We know they are both
// strings.
if (equality) {
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
STATIC_ASSERT(kInternalizedTag == 0);
Label not_internalized_strings;
__ Orr(x12, lhs_type, rhs_type);
@@ -3794,7 +3735,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ Bind(&miss);
@@ -3803,7 +3744,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+ DCHECK(state_ == CompareIC::OBJECT);
ASM_LOCATION("ICCompareStub[Objects]");
Label miss;
@@ -3817,7 +3758,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
__ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
__ Sub(result, rhs, lhs);
__ Ret();
@@ -3893,12 +3834,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
- ASSERT(!AreAliased(hash, character));
+ DCHECK(!AreAliased(hash, character));
// hash = character + (character << 10);
__ LoadRoot(hash, Heap::kHashSeedRootIndex);
// Untag smi seed and add the character.
- __ Add(hash, character, Operand(hash, LSR, kSmiShift));
+ __ Add(hash, character, Operand::UntagSmi(hash));
// Compute hashes modulo 2^32 using a 32-bit W register.
Register hash_w = hash.W();
@@ -3913,7 +3854,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character) {
- ASSERT(!AreAliased(hash, character));
+ DCHECK(!AreAliased(hash, character));
// hash += character;
__ Add(hash, hash, character);
@@ -3934,7 +3875,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// Compute hashes modulo 2^32 using a 32-bit W register.
Register hash_w = hash.W();
Register scratch_w = scratch.W();
- ASSERT(!AreAliased(hash_w, scratch_w));
+ DCHECK(!AreAliased(hash_w, scratch_w));
// hash += hash << 3;
__ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
@@ -4184,7 +4125,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// x1: result_length
@@ -4208,7 +4149,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3) {
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
Register result = x0;
Register left_length = scratch1;
Register right_length = scratch2;
@@ -4251,7 +4192,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Register scratch4) {
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+ DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
@@ -4272,7 +4213,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths - strings up to min-length are equal.
__ Bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
// Use length_delta as result if it's zero.
Register result = x0;
@@ -4297,7 +4238,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
Register scratch1,
Register scratch2,
Label* chars_not_equal) {
- ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+ DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
@@ -4361,7 +4302,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -4392,12 +4333,6 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
-bool CodeStub::CanUseFPRegisters() {
- // FP registers always available on ARM64.
- return true;
-}
-
-
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// We need some extra registers for this stub, they have been allocated
// but we need to save them before using them.
@@ -4443,8 +4378,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
Register address =
x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.Is(regs_.object()));
- ASSERT(!address.Is(x0));
+ DCHECK(!address.Is(regs_.object()));
+ DCHECK(!address.Is(x0));
__ Mov(address, regs_.address());
__ Mov(x0, regs_.object());
__ Mov(x1, address);
@@ -4606,7 +4541,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(value, &smi_element);
// Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
- __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::kElementsKindShift),
+ __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
&fast_elements);
// Store into the array literal requires an elements transition. Call into
@@ -4646,7 +4581,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
@@ -4661,22 +4596,31 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
-// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
-// a "Push lr" instruction, followed by a call.
-static const unsigned int kProfileEntryHookCallSize =
- Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
+ // The entry hook is a "BumpSystemStackPointer" instruction (sub),
+ // followed by a "Push lr" instruction, followed by a call.
+ unsigned int size =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
+ // "BumpSystemStackPointer".
+ size += kInstructionSize;
+ }
+ return size;
+}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
Assembler::BlockConstPoolScope no_const_pools(masm);
+ DontEmitDebugCodeScope no_debug_code(masm);
Label entry_hook_call_start;
__ Bind(&entry_hook_call_start);
__ Push(lr);
__ CallStub(&stub);
- ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- kProfileEntryHookCallSize);
+ DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
+ GetProfileEntryHookCallSize(masm));
__ Pop(lr);
}
@@ -4690,11 +4634,11 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// from anywhere.
// TODO(jbramley): What about FP registers?
__ PushCPURegList(kCallerSaved);
- ASSERT(kCallerSaved.IncludesAliasOf(lr));
+ DCHECK(kCallerSaved.IncludesAliasOf(lr));
const int kNumSavedRegs = kCallerSaved.Count();
// Compute the function's address as the first argument.
- __ Sub(x0, lr, kProfileEntryHookCallSize);
+ __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =
@@ -4751,7 +4695,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
// Make sure the caller configured the stack pointer (see comment in
// DirectCEntryStub::Generate).
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
@@ -4776,7 +4720,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
Register name,
Register scratch1,
Register scratch2) {
- ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+ DCHECK(!AreAliased(elements, name, scratch1, scratch2));
// Assert that name contains a string.
__ AssertName(name);
@@ -4793,7 +4737,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Add(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4801,7 +4745,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
@@ -4824,7 +4768,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
__ PushCPURegList(spill_list);
if (name.is(x0)) {
- ASSERT(!elements.is(x1));
+ DCHECK(!elements.is(x1));
__ Mov(x1, name);
__ Mov(x0, elements);
} else {
@@ -4853,8 +4797,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register properties,
Handle<Name> name,
Register scratch0) {
- ASSERT(!AreAliased(receiver, properties, scratch0));
- ASSERT(name->IsUniqueName());
+ DCHECK(!AreAliased(receiver, properties, scratch0));
+ DCHECK(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -4870,7 +4814,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
@@ -4951,7 +4895,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Add(index, hash,
NameDictionary::GetProbeOffset(i) << Name::kHashShift);
@@ -4961,7 +4905,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
@@ -5397,7 +5341,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
- ASSERT(!AreAliased(x0, api_function_address));
+ DCHECK(!AreAliased(x0, api_function_address));
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index a92445c47..75a945299 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -5,7 +5,7 @@
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_
-#include "ic-inl.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
@@ -27,8 +27,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
private:
SaveFPRegsMode save_doubles_;
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ Major MajorKey() const { return StoreBufferOverflow; }
+ int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
@@ -56,15 +56,14 @@ class StringHelper : public AllStatic {
class StoreRegistersStateStub: public PlatformCodeStub {
public:
- StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
- : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+ explicit StoreRegistersStateStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
static Register to_be_pushed_lr() { return ip0; }
static void GenerateAheadOfTime(Isolate* isolate);
private:
- Major MajorKey() { return StoreRegistersState; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
- SaveFPRegsMode save_doubles_;
+ Major MajorKey() const { return StoreRegistersState; }
+ int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
@@ -72,14 +71,13 @@ class StoreRegistersStateStub: public PlatformCodeStub {
class RestoreRegistersStateStub: public PlatformCodeStub {
public:
- RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
- : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+ explicit RestoreRegistersStateStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
- Major MajorKey() { return RestoreRegistersState; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
- SaveFPRegsMode save_doubles_;
+ Major MajorKey() const { return RestoreRegistersState; }
+ int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
@@ -122,17 +120,17 @@ class RecordWriteStub: public PlatformCodeStub {
Instruction* instr2 = instr1->following();
if (instr1->IsUncondBranchImm()) {
- ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+ DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
- ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+ DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
- ASSERT(instr2->IsPCRelAddressing());
+ DCHECK(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
@@ -151,31 +149,31 @@ class RecordWriteStub: public PlatformCodeStub {
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
- ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
- ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
switch (mode) {
case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
+ DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
- ASSERT(GetMode(stub) == mode);
+ DCHECK(GetMode(stub) == mode);
}
private:
@@ -191,7 +189,7 @@ class RecordWriteStub: public PlatformCodeStub {
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
- ASSERT(!AreAliased(scratch, object, address));
+ DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
@@ -303,9 +301,9 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- Major MajorKey() { return RecordWrite; }
+ Major MajorKey() const { return RecordWrite; }
- int MinorKey() {
+ int MinorKey() const {
return MinorKeyFor(object_, value_, address_, remembered_set_action_,
save_fp_regs_mode_);
}
@@ -315,9 +313,9 @@ class RecordWriteStub: public PlatformCodeStub {
Register address,
RememberedSetAction action,
SaveFPRegsMode fp_mode) {
- ASSERT(object.Is64Bits());
- ASSERT(value.Is64Bits());
- ASSERT(address.Is64Bits());
+ DCHECK(object.Is64Bits());
+ DCHECK(value.Is64Bits());
+ DCHECK(address.Is64Bits());
return ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
@@ -354,8 +352,8 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- Major MajorKey() { return DirectCEntry; }
- int MinorKey() { return 0; }
+ Major MajorKey() const { return DirectCEntry; }
+ int MinorKey() const { return 0; }
bool NeedsImmovableCode() { return true; }
};
@@ -400,11 +398,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return NameDictionaryLookup; }
+ Major MajorKey() const { return NameDictionaryLookup; }
- int MinorKey() {
- return LookupModeBits::encode(mode_);
- }
+ int MinorKey() const { return LookupModeBits::encode(mode_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
@@ -417,8 +413,8 @@ class SubStringStub: public PlatformCodeStub {
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
+ Major MajorKey() const { return SubString; }
+ int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
@@ -447,8 +443,8 @@ class StringCompareStub: public PlatformCodeStub {
Register scratch3);
private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
+ virtual Major MajorKey() const { return StringCompare; }
+ virtual int MinorKey() const { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
@@ -461,8 +457,9 @@ class StringCompareStub: public PlatformCodeStub {
};
-struct PlatformCallInterfaceDescriptor {
- explicit PlatformCallInterfaceDescriptor(
+class PlatformInterfaceDescriptor {
+ public:
+ explicit PlatformInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index ff06eda86..16b6d3b18 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-arm64.h"
+#include "src/arm64/simulator-arm64.h"
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -35,7 +35,8 @@ UnaryMathFunction CreateExpFunction() {
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
@@ -61,10 +62,10 @@ UnaryMathFunction CreateExpFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
@@ -85,14 +86,14 @@ UnaryMathFunction CreateSqrtFunction() {
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
+ DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
+ DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
@@ -101,26 +102,28 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
+ MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register target_map,
+ AllocationSiteMode mode,
Label* allocation_memento_found) {
- // ----------- S t a t e -------------
- // -- x2 : receiver
- // -- x3 : target map
- // -----------------------------------
- Register receiver = x2;
- Register map = x3;
+ ASM_LOCATION(
+ "ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
+ DCHECK(!AreAliased(receiver, key, value, target_map));
if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_memento_found != NULL);
+ DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
allocation_memento_found);
}
// Set transitioned map.
- __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
- map,
+ target_map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@@ -130,19 +133,25 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register target_map,
+ AllocationSiteMode mode,
+ Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- x3 : target map, scratch for subsequent call
- // -----------------------------------
- Register receiver = x2;
- Register target_map = x3;
-
Label gc_required, only_change_map;
+ Register elements = x4;
+ Register length = x5;
+ Register array_size = x6;
+ Register array = x7;
+
+ Register scratch = x6;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map,
+ elements, length, array_size, array));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
@@ -150,32 +159,28 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
- Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
- Register array_size = x6;
- Register array = x7;
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
- Register map_root = x6;
+ Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -183,7 +188,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Add(x10, array, kHeapObjectTag);
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
- x6, kLRHasBeenSaved, kDontSaveFPRegs,
+ scratch, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare for conversion loop.
@@ -202,7 +207,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
@@ -234,20 +239,22 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register target_map,
+ AllocationSiteMode mode,
+ Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -- x3 : target map, scratch for subsequent call
- // -- x4 : scratch (elements)
- // -----------------------------------
- Register value = x0;
- Register key = x1;
- Register receiver = x2;
- Register target_map = x3;
+ Register elements = x4;
+ Register array_size = x6;
+ Register array = x7;
+ Register length = x5;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map,
+ elements, array_size, array, length));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
@@ -256,7 +263,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
- Register elements = x4;
+
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
@@ -264,20 +271,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
- Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
-
// Allocate new FixedArray.
- Register array_size = x6;
- Register array = x7;
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
- Register map_root = x6;
+ Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
@@ -315,8 +318,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
- Register heap_num = x5;
- __ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
+ Register heap_num = length;
+ Register scratch = array_size;
+ Register scratch2 = elements;
+ __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
x13, heap_num_map);
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
@@ -351,7 +356,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
CodeAgingHelper::CodeAgingHelper() {
- ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
@@ -363,7 +368,7 @@ CodeAgingHelper::CodeAgingHelper() {
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
- ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
+ DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
PatchingAssembler patcher_old(old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
@@ -415,7 +420,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
- ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
+ DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -473,7 +478,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
// TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
// can be bound far away in deferred code.
__ Tst(result, kShortExternalStringMask);
@@ -511,10 +516,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// instead of fmul and fsub. Doing this changes the result, but since this is
// an estimation anyway, does it matter?
- ASSERT(!AreAliased(input, result,
+ DCHECK(!AreAliased(input, result,
double_temp1, double_temp2,
temp1, temp2, temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+ DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+ DCHECK(!masm->serializer_enabled()); // External references not serializable.
Label done;
DoubleRegister double_temp3 = result;
@@ -534,7 +540,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label result_is_finite_non_zero;
// Assert that we can load offset 0 (the small input threshold) and offset 1
// (the large input threshold) with a single ldp.
- ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
+ DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
ExpConstant(constants, 0).offset()));
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
@@ -564,7 +570,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Bind(&result_is_finite_non_zero);
// Assert that we can load offset 3 and offset 4 with a single ldp.
- ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
+ DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
ExpConstant(constants, 3).offset()));
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
@@ -572,7 +578,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Fsub(double_temp1, double_temp1, double_temp3);
// Assert that we can load offset 5 and offset 6 with a single ldp.
- ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
+ DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
ExpConstant(constants, 5).offset()));
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
// TODO(jbramley): Consider using Fnmsub here.
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
index bb42bf8d3..9ef148cc4 100644
--- a/deps/v8/src/arm64/codegen-arm64.h
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -5,8 +5,8 @@
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 7ee22760d..8db120ba4 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -15,7 +15,9 @@ STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
// Get the standard printf format macros for C99 stdint types.
+#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
+#endif
#include <inttypes.h>
@@ -25,8 +27,7 @@ namespace internal {
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
-const unsigned kLiteralEntrySize = 4;
-const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kLoadLiteralScaleLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MB;
const unsigned kNumberOfRegisters = 32;
@@ -258,15 +259,15 @@ enum Condition {
nv = 15 // Behaves as always/al.
};
-inline Condition InvertCondition(Condition cond) {
+inline Condition NegateCondition(Condition cond) {
// Conditions al and nv behave identically, as "always true". They can't be
// inverted, because there is no never condition.
- ASSERT((cond != al) && (cond != nv));
+ DCHECK((cond != al) && (cond != nv));
return static_cast<Condition>(cond ^ 1);
}
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseConditionForCmp(Condition cond) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
switch (cond) {
case lo:
return hi;
@@ -293,7 +294,7 @@ inline Condition ReverseConditionForCmp(Condition cond) {
// 'mi' for instance).
UNREACHABLE();
return nv;
- };
+ }
}
enum FlagsUpdate {
@@ -399,7 +400,7 @@ enum SystemRegister {
//
// The enumerations can be used like this:
//
-// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// DCHECK(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
// switch(instr->Mask(PCRelAddressingMask)) {
// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 0e1ed91be..39beb6d9e 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -4,23 +4,16 @@
// CPU specific code for arm independent of OS goes here.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "arm64/cpu-arm64.h"
-#include "arm64/utils-arm64.h"
+#include "src/arm64/utils-arm64.h"
+#include "src/assembler.h"
namespace v8 {
namespace internal {
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::cross_compile_ = 0;
-
-
class CacheLineSizes {
public:
CacheLineSizes() {
@@ -31,22 +24,23 @@ class CacheLineSizes {
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
: [ctr] "=r" (cache_type_register_));
#endif
- };
+ }
uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
private:
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
- // The cache type register holds the size of the caches as a power of two.
- return 1 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
+ // The cache type register holds the size of cache lines in words as a
+ // power of two.
+ return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
}
uint32_t cache_type_register_;
};
-void CPU::FlushICache(void* address, size_t length) {
+void CpuFeatures::FlushICache(void* address, size_t length) {
if (length == 0) return;
#ifdef USE_SIMULATOR
@@ -65,8 +59,8 @@ void CPU::FlushICache(void* address, size_t length) {
uintptr_t dsize = sizes.dcache_line_size();
uintptr_t isize = sizes.icache_line_size();
// Cache line sizes are always a power of 2.
- ASSERT(CountSetBits(dsize, 64) == 1);
- ASSERT(CountSetBits(isize, 64) == 1);
+ DCHECK(CountSetBits(dsize, 64) == 1);
+ DCHECK(CountSetBits(isize, 64) == 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
@@ -124,17 +118,6 @@ void CPU::FlushICache(void* address, size_t length) {
#endif
}
-
-void CpuFeatures::Probe(bool serializer_enabled) {
- // AArch64 has no configuration options, no further probing is required.
- supported_ = 0;
-
-#ifdef DEBUG
- initialized_ = true;
-#endif
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/cpu-arm64.h b/deps/v8/src/arm64/cpu-arm64.h
deleted file mode 100644
index 0b7a7d7f1..000000000
--- a/deps/v8/src/arm64/cpu-arm64.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM64_CPU_ARM64_H_
-#define V8_ARM64_CPU_ARM64_H_
-
-#include <stdio.h>
-#include "serialize.h"
-#include "cpu.h"
-
-namespace v8 {
-namespace internal {
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe(bool serializer_enabled);
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- // There are no optional features for ARM64.
- return false;
- };
-
- // There are no optional features for ARM64.
- static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
- return IsSupported(f);
- }
-
- // I and D cache line size in bytes.
- static unsigned dcache_line_size();
- static unsigned icache_line_size();
-
- static unsigned supported_;
-
- static bool VerifyCrossCompiling() {
- // There are no optional features for ARM64.
- ASSERT(cross_compile_ == 0);
- return true;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- // There are no optional features for ARM64.
- USE(f);
- ASSERT(cross_compile_ == 0);
- return true;
- }
-
- static bool SupportsCrankshaft() { return true; }
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
-
- // This isn't used (and is always 0), but it is required by V8.
- static unsigned cross_compile_;
-
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM64_CPU_ARM64_H_
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
index 6b1896782..746d9a8b4 100644
--- a/deps/v8/src/arm64/debug-arm64.cc
+++ b/deps/v8/src/arm64/debug-arm64.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "codegen.h"
-#include "debug.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
@@ -46,7 +46,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code.
- patcher.LoadLiteral(ip0, 3 * kInstructionSize);
+ patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break return code will push a frame and call statically compiled
// code. By using blr, even though control will not return after the branch,
@@ -67,21 +67,21 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() {
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
+ DCHECK(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
- // Patch the code emitted by Debug::GenerateSlots, changing the debug break
- // slot code from
+ // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
+ // break slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
@@ -105,7 +105,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code.
- patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break slot code will push a frame and call statically compiled
// code. By using blr, event hough control will not return after the branch,
@@ -118,12 +118,11 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
+ DCHECK(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
-const bool Debug::FramePaddingLayout::kIsSupported = false;
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
@@ -132,6 +131,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Load padding words on stack.
+ __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
+ __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
+ __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+ __ Push(scratch);
+
// Any live values (object_regs and non_object_regs) in caller-saved
// registers (or lr) need to be stored on the stack so that their values are
// safely preserved for a call into C code.
@@ -145,12 +150,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// collector doesn't try to interpret them as pointers.
//
// TODO(jbramley): Why can't this handle callee-saved registers?
- ASSERT((~kCallerSaved.list() & object_regs) == 0);
- ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- ASSERT((scratch.Bit() & object_regs) == 0);
- ASSERT((scratch.Bit() & non_object_regs) == 0);
- ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
+ DCHECK((~kCallerSaved.list() & object_regs) == 0);
+ DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
+ DCHECK((object_regs & non_object_regs) == 0);
+ DCHECK((scratch.Bit() & object_regs) == 0);
+ DCHECK((scratch.Bit() & non_object_regs) == 0);
+ DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
STATIC_ASSERT(kSmiValueSize == 32);
CPURegList non_object_list =
@@ -158,15 +163,16 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
while (!non_object_list.IsEmpty()) {
// Store each non-object register as two SMIs.
Register reg = Register(non_object_list.PopLowestIndex());
- __ Push(reg);
- __ Poke(wzr, 0);
- __ Push(reg.W(), wzr);
+ __ Lsr(scratch, reg, 32);
+ __ SmiTagAndPush(scratch, reg);
+
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
- STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
}
if (object_regs != 0) {
@@ -201,21 +207,24 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Bfxil(reg, scratch, 32, 32);
}
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
- masm->isolate());
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
__ Mov(scratch, after_break_target);
__ Ldr(scratch, MemOperand(scratch));
__ Br(scratch);
}
-void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- x1 : function
@@ -225,54 +234,41 @@ void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -- [sp] : receiver
- // -----------------------------------
- // Registers x0 and x2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
- // Registers x0, x1, and x2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm64.cc).
+ Register receiver = StoreIC::ReceiverRegister();
+ Register name = StoreIC::NameRegister();
+ Register value = StoreIC::ValueRegister();
+ Generate_DebugBreakCallHelper(
+ masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for keyed IC load (from ic-arm.cc).
+ GenerateLoadICDebugBreak(masm);
}
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC keyed store call (from ic-arm64.cc).
+ Register receiver = KeyedStoreIC::ReceiverRegister();
+ Register name = KeyedStoreIC::NameRegister();
+ Register value = KeyedStoreIC::ValueRegister();
+ Generate_DebugBreakCallHelper(
+ masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
@@ -281,7 +277,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
@@ -289,7 +285,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
@@ -298,7 +294,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
@@ -308,7 +304,8 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
@@ -321,7 +318,7 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
@@ -333,23 +330,48 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, x10);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+
+ __ Mov(scratch, restarter_frame_function_slot);
+ __ Str(xzr, MemOperand(scratch));
+
+ // We do not know our frame height, but set sp based on fp.
+ __ Sub(masm->StackPointer(), fp, kPointerSize);
+ __ AssertStackConsistency();
+
+ __ Pop(x1, fp, lr); // Function, Frame, Return address.
+
+ // Load context from the function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+
+ // Re-run JSFunction, x1 is function, cp is context.
+ __ Br(scratch);
}
-const bool Debug::kFrameDropperSupported = false;
+
+const bool LiveEdit::kFrameDropperSupported = true;
} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index eb791336d..5dd2fd9cc 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -5,9 +5,9 @@
#ifndef V8_ARM64_DECODER_ARM64_INL_H_
#define V8_ARM64_DECODER_ARM64_INL_H_
-#include "arm64/decoder-arm64.h"
-#include "globals.h"
-#include "utils.h"
+#include "src/arm64/decoder-arm64.h"
+#include "src/globals.h"
+#include "src/utils.h"
namespace v8 {
@@ -96,17 +96,17 @@ void Decoder<V>::Decode(Instruction *instr) {
template<typename V>
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x0);
+ DCHECK(instr->Bits(27, 24) == 0x0);
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
- ASSERT(instr->Bit(28) == 0x1);
+ DCHECK(instr->Bit(28) == 0x1);
V::VisitPCRelAddressing(instr);
}
template<typename V>
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0x4) ||
+ DCHECK((instr->Bits(27, 24) == 0x4) ||
(instr->Bits(27, 24) == 0x5) ||
(instr->Bits(27, 24) == 0x6) ||
(instr->Bits(27, 24) == 0x7) );
@@ -208,7 +208,7 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0x8) ||
+ DCHECK((instr->Bits(27, 24) == 0x8) ||
(instr->Bits(27, 24) == 0x9) ||
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
@@ -328,7 +328,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLogical(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x2);
+ DCHECK(instr->Bits(27, 24) == 0x2);
if (instr->Mask(0x80400000) == 0x00400000) {
V::VisitUnallocated(instr);
@@ -348,7 +348,7 @@ void Decoder<V>::DecodeLogical(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x3);
+ DCHECK(instr->Bits(27, 24) == 0x3);
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
@@ -374,7 +374,7 @@ void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x1);
+ DCHECK(instr->Bits(27, 24) == 0x1);
if (instr->Bit(23) == 1) {
V::VisitUnallocated(instr);
} else {
@@ -385,7 +385,7 @@ void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0xA) ||
+ DCHECK((instr->Bits(27, 24) == 0xA) ||
(instr->Bits(27, 24) == 0xB) );
if (instr->Bit(24) == 0) {
@@ -501,7 +501,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeFP(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0xE) ||
+ DCHECK((instr->Bits(27, 24) == 0xE) ||
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
@@ -614,7 +614,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
}
} else {
// Bit 30 == 1 has been handled earlier.
- ASSERT(instr->Bit(30) == 0);
+ DCHECK(instr->Bit(30) == 0);
if (instr->Mask(0xA0800000) != 0) {
V::VisitUnallocated(instr);
} else {
@@ -630,7 +630,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
- ASSERT(instr->Bits(29, 25) == 0x6);
+ DCHECK(instr->Bits(29, 25) == 0x6);
V::VisitUnimplemented(instr);
}
@@ -638,7 +638,7 @@ void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
- ASSERT(instr->Bits(27, 25) == 0x7);
+ DCHECK(instr->Bits(27, 25) == 0x7);
V::VisitUnimplemented(instr);
}
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
index 13962387d..cf7dc34c5 100644
--- a/deps/v8/src/arm64/decoder-arm64.cc
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "globals.h"
-#include "utils.h"
-#include "arm64/decoder-arm64.h"
+#include "src/arm64/decoder-arm64.h"
+#include "src/globals.h"
+#include "src/utils.h"
namespace v8 {
@@ -39,7 +39,7 @@ void DispatchingDecoderVisitor::InsertVisitorBefore(
}
// We reached the end of the list. The last element must be
// registered_visitor.
- ASSERT(*it == registered_visitor);
+ DCHECK(*it == registered_visitor);
visitors_.insert(it, new_visitor);
}
@@ -57,7 +57,7 @@ void DispatchingDecoderVisitor::InsertVisitorAfter(
}
// We reached the end of the list. The last element must be
// registered_visitor.
- ASSERT(*it == registered_visitor);
+ DCHECK(*it == registered_visitor);
visitors_.push_back(new_visitor);
}
@@ -70,7 +70,7 @@ void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
#define DEFINE_VISITOR_CALLERS(A) \
void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
- ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ DCHECK(instr->Mask(A##FMask) == A##Fixed); \
} \
std::list<DecoderVisitor*>::iterator it; \
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
index 4409421bd..af6bcc6f4 100644
--- a/deps/v8/src/arm64/decoder-arm64.h
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -7,8 +7,8 @@
#include <list>
-#include "globals.h"
-#include "arm64/instructions-arm64.h"
+#include "src/arm64/instructions-arm64.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/delayed-masm-arm64-inl.h b/deps/v8/src/arm64/delayed-masm-arm64-inl.h
new file mode 100644
index 000000000..2c4463037
--- /dev/null
+++ b/deps/v8/src/arm64/delayed-masm-arm64-inl.h
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+#define V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+
+#include "src/arm64/delayed-masm-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+void DelayedMasm::EndDelayedUse() {
+ EmitPending();
+ DCHECK(!scratch_register_acquired_);
+ ResetSavedValue();
+}
+
+
+void DelayedMasm::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ EmitPending();
+ DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+ __ Mov(rd, operand, discard_mode);
+}
+
+
+void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
+ EmitPending();
+ __ Fmov(fd, fn);
+}
+
+
+void DelayedMasm::Fmov(FPRegister fd, double imm) {
+ EmitPending();
+ __ Fmov(fd, imm);
+}
+
+
+void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
+ EmitPending();
+ DCHECK(!IsScratchRegister(result) || scratch_register_acquired_);
+ __ LoadObject(result, object);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DELAYED_MASM_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/delayed-masm-arm64.cc b/deps/v8/src/arm64/delayed-masm-arm64.cc
new file mode 100644
index 000000000..c3bda915e
--- /dev/null
+++ b/deps/v8/src/arm64/delayed-masm-arm64.cc
@@ -0,0 +1,198 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/arm64/delayed-masm-arm64.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
+ DCHECK(src->IsStackSlot());
+ DCHECK(dst->IsStackSlot());
+ MemOperand src_operand = cgen_->ToMemOperand(src);
+ MemOperand dst_operand = cgen_->ToMemOperand(dst);
+ if (pending_ == kStackSlotMove) {
+ DCHECK(pending_pc_ == masm_->pc_offset());
+ UseScratchRegisterScope scope(masm_);
+ DoubleRegister temp1 = scope.AcquireD();
+ DoubleRegister temp2 = scope.AcquireD();
+ switch (MemOperand::AreConsistentForPair(pending_address_src_,
+ src_operand)) {
+ case MemOperand::kNotPair:
+ __ Ldr(temp1, pending_address_src_);
+ __ Ldr(temp2, src_operand);
+ break;
+ case MemOperand::kPairAB:
+ __ Ldp(temp1, temp2, pending_address_src_);
+ break;
+ case MemOperand::kPairBA:
+ __ Ldp(temp2, temp1, src_operand);
+ break;
+ }
+ switch (MemOperand::AreConsistentForPair(pending_address_dst_,
+ dst_operand)) {
+ case MemOperand::kNotPair:
+ __ Str(temp1, pending_address_dst_);
+ __ Str(temp2, dst_operand);
+ break;
+ case MemOperand::kPairAB:
+ __ Stp(temp1, temp2, pending_address_dst_);
+ break;
+ case MemOperand::kPairBA:
+ __ Stp(temp2, temp1, dst_operand);
+ break;
+ }
+ ResetPending();
+ return;
+ }
+
+ EmitPending();
+ pending_ = kStackSlotMove;
+ pending_address_src_ = src_operand;
+ pending_address_dst_ = dst_operand;
+#ifdef DEBUG
+ pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
+ DCHECK(!scratch_register_acquired_);
+ if ((pending_ == kStoreConstant) && (value == pending_value_)) {
+ MemOperand::PairResult result =
+ MemOperand::AreConsistentForPair(pending_address_dst_, operand);
+ if (result != MemOperand::kNotPair) {
+ const MemOperand& dst =
+ (result == MemOperand::kPairAB) ?
+ pending_address_dst_ :
+ operand;
+ DCHECK(pending_pc_ == masm_->pc_offset());
+ if (pending_value_ == 0) {
+ __ Stp(xzr, xzr, dst);
+ } else {
+ SetSavedValue(pending_value_);
+ __ Stp(ScratchRegister(), ScratchRegister(), dst);
+ }
+ ResetPending();
+ return;
+ }
+ }
+
+ EmitPending();
+ pending_ = kStoreConstant;
+ pending_address_dst_ = operand;
+ pending_value_ = value;
+#ifdef DEBUG
+ pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) {
+ if ((pending_ == kLoad) &&
+ pending_register_.IsSameSizeAndType(rd)) {
+ switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) {
+ case MemOperand::kNotPair:
+ break;
+ case MemOperand::kPairAB:
+ DCHECK(pending_pc_ == masm_->pc_offset());
+ DCHECK(!IsScratchRegister(pending_register_) ||
+ scratch_register_acquired_);
+ DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+ __ Ldp(pending_register_, rd, pending_address_src_);
+ ResetPending();
+ return;
+ case MemOperand::kPairBA:
+ DCHECK(pending_pc_ == masm_->pc_offset());
+ DCHECK(!IsScratchRegister(pending_register_) ||
+ scratch_register_acquired_);
+ DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+ __ Ldp(rd, pending_register_, operand);
+ ResetPending();
+ return;
+ }
+ }
+
+ EmitPending();
+ pending_ = kLoad;
+ pending_register_ = rd;
+ pending_address_src_ = operand;
+#ifdef DEBUG
+ pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
+ if ((pending_ == kStore) &&
+ pending_register_.IsSameSizeAndType(rd)) {
+ switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) {
+ case MemOperand::kNotPair:
+ break;
+ case MemOperand::kPairAB:
+ DCHECK(pending_pc_ == masm_->pc_offset());
+ __ Stp(pending_register_, rd, pending_address_dst_);
+ ResetPending();
+ return;
+ case MemOperand::kPairBA:
+ DCHECK(pending_pc_ == masm_->pc_offset());
+ __ Stp(rd, pending_register_, operand);
+ ResetPending();
+ return;
+ }
+ }
+
+ EmitPending();
+ pending_ = kStore;
+ pending_register_ = rd;
+ pending_address_dst_ = operand;
+#ifdef DEBUG
+ pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::EmitPending() {
+ DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
+ switch (pending_) {
+ case kNone:
+ return;
+ case kStoreConstant:
+ if (pending_value_ == 0) {
+ __ Str(xzr, pending_address_dst_);
+ } else {
+ SetSavedValue(pending_value_);
+ __ Str(ScratchRegister(), pending_address_dst_);
+ }
+ break;
+ case kLoad:
+ DCHECK(!IsScratchRegister(pending_register_) ||
+ scratch_register_acquired_);
+ __ Ldr(pending_register_, pending_address_src_);
+ break;
+ case kStore:
+ __ Str(pending_register_, pending_address_dst_);
+ break;
+ case kStackSlotMove: {
+ UseScratchRegisterScope scope(masm_);
+ DoubleRegister temp = scope.AcquireD();
+ __ Ldr(temp, pending_address_src_);
+ __ Str(temp, pending_address_dst_);
+ break;
+ }
+ }
+ ResetPending();
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/delayed-masm-arm64.h b/deps/v8/src/arm64/delayed-masm-arm64.h
new file mode 100644
index 000000000..76227a389
--- /dev/null
+++ b/deps/v8/src/arm64/delayed-masm-arm64.h
@@ -0,0 +1,164 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_DELAYED_MASM_ARM64_H_
+#define V8_ARM64_DELAYED_MASM_ARM64_H_
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+
+// This class delays the generation of some instructions. This way, we have a
+// chance to merge two instructions in one (with load/store pair).
+// Each instruction must either:
+// - merge with the pending instruction and generate just one instruction.
+// - emit the pending instruction and then generate the instruction (or set the
+// pending instruction).
+class DelayedMasm BASE_EMBEDDED {
+ public:
+ DelayedMasm(LCodeGen* owner,
+ MacroAssembler* masm,
+ const Register& scratch_register)
+ : cgen_(owner), masm_(masm), scratch_register_(scratch_register),
+ scratch_register_used_(false), pending_(kNone), saved_value_(0) {
+#ifdef DEBUG
+ pending_register_ = no_reg;
+ pending_value_ = 0;
+ pending_pc_ = 0;
+ scratch_register_acquired_ = false;
+#endif
+ }
+ ~DelayedMasm() {
+ DCHECK(!scratch_register_acquired_);
+ DCHECK(!scratch_register_used_);
+ DCHECK(!pending());
+ }
+ inline void EndDelayedUse();
+
+ const Register& ScratchRegister() {
+ scratch_register_used_ = true;
+ return scratch_register_;
+ }
+ bool IsScratchRegister(const CPURegister& reg) {
+ return reg.Is(scratch_register_);
+ }
+ bool scratch_register_used() const { return scratch_register_used_; }
+ void reset_scratch_register_used() { scratch_register_used_ = false; }
+ // Acquire/Release scratch register for use outside this class.
+ void AcquireScratchRegister() {
+ EmitPending();
+ ResetSavedValue();
+#ifdef DEBUG
+ DCHECK(!scratch_register_acquired_);
+ scratch_register_acquired_ = true;
+#endif
+ }
+ void ReleaseScratchRegister() {
+#ifdef DEBUG
+ DCHECK(scratch_register_acquired_);
+ scratch_register_acquired_ = false;
+#endif
+ }
+ bool pending() { return pending_ != kNone; }
+
+ // Extra layer over the macro-assembler instructions (which emits the
+ // potential pending instruction).
+ inline void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ inline void Fmov(FPRegister fd, FPRegister fn);
+ inline void Fmov(FPRegister fd, double imm);
+ inline void LoadObject(Register result, Handle<Object> object);
+ // Instructions which try to merge which the pending instructions.
+ void StackSlotMove(LOperand* src, LOperand* dst);
+ // StoreConstant can only be used if the scratch register is not acquired.
+ void StoreConstant(uint64_t value, const MemOperand& operand);
+ void Load(const CPURegister& rd, const MemOperand& operand);
+ void Store(const CPURegister& rd, const MemOperand& operand);
+ // Emit the potential pending instruction.
+ void EmitPending();
+ // Reset the pending state.
+ void ResetPending() {
+ pending_ = kNone;
+#ifdef DEBUG
+ pending_register_ = no_reg;
+ MemOperand tmp;
+ pending_address_src_ = tmp;
+ pending_address_dst_ = tmp;
+ pending_value_ = 0;
+ pending_pc_ = 0;
+#endif
+ }
+ void InitializeRootRegister() {
+ masm_->InitializeRootRegister();
+ }
+
+ private:
+ // Set the saved value and load the ScratchRegister with it.
+ void SetSavedValue(uint64_t saved_value) {
+ DCHECK(saved_value != 0);
+ if (saved_value_ != saved_value) {
+ masm_->Mov(ScratchRegister(), saved_value);
+ saved_value_ = saved_value;
+ }
+ }
+ // Reset the saved value (i.e. the value of ScratchRegister is no longer
+ // known).
+ void ResetSavedValue() {
+ saved_value_ = 0;
+ }
+
+ LCodeGen* cgen_;
+ MacroAssembler* masm_;
+
+ // Register used to store a constant.
+ Register scratch_register_;
+ bool scratch_register_used_;
+
+ // Sometimes we store or load two values in two contiguous stack slots.
+ // In this case, we try to use the ldp/stp instructions to reduce code size.
+ // To be able to do that, instead of generating directly the instructions,
+ // we register with the following fields that an instruction needs to be
+ // generated. Then with the next instruction, if the instruction is
+ // consistent with the pending one for stp/ldp we generate ldp/stp. Else,
+ // if they are not consistent, we generate the pending instruction and we
+ // register the new instruction (which becomes pending).
+
+ // Enumeration of instructions which can be pending.
+ enum Pending {
+ kNone,
+ kStoreConstant,
+ kLoad, kStore,
+ kStackSlotMove
+ };
+ // The pending instruction.
+ Pending pending_;
+ // For kLoad, kStore: register which must be loaded/stored.
+ CPURegister pending_register_;
+ // For kLoad, kStackSlotMove: address of the load.
+ MemOperand pending_address_src_;
+ // For kStoreConstant, kStore, kStackSlotMove: address of the store.
+ MemOperand pending_address_dst_;
+ // For kStoreConstant: value to be stored.
+ uint64_t pending_value_;
+ // Value held into the ScratchRegister if the saved_value_ is not 0.
+ // For 0, we use xzr.
+ uint64_t saved_value_;
+#ifdef DEBUG
+ // Address where the pending instruction must be generated. It's only used to
+ // check that nothing else has been generated since we set the pending
+ // instruction.
+ int pending_pc_;
+ // If true, the scratch register has been acquired outside this class. The
+ // scratch register can no longer be used for constants.
+ bool scratch_register_acquired_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DELAYED_MASM_ARM64_H_
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index a19e2fc9f..d40468029 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
@@ -32,9 +32,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
- SharedFunctionInfo* shared =
- SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
- shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
@@ -48,13 +45,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
- patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
- ASSERT((prev_call_address == NULL) ||
+ DCHECK((prev_call_address == NULL) ||
(call_address >= prev_call_address + patch_size()));
- ASSERT(call_address + patch_size() <= code->instruction_end());
+ DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
@@ -93,7 +90,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler_);
+ ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
@@ -110,47 +107,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
-}
-
-
-#define __ masm->
-
-static void CopyRegisterDumpToFrame(MacroAssembler* masm,
- Register frame,
- CPURegList reg_list,
- Register scratch1,
- Register scratch2,
- int src_offset,
- int dst_offset) {
- int offset0, offset1;
- CPURegList copy_to_input = reg_list;
- int reg_count = reg_list.Count();
- int reg_size = reg_list.RegisterSizeInBytes();
- for (int i = 0; i < (reg_count / 2); i++) {
- __ PeekPair(scratch1, scratch2, src_offset + (i * reg_size * 2));
-
- offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
- offset1 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
-
- if ((offset0 + reg_size) == offset1) {
- // Registers are adjacent: store in pairs.
- __ Stp(scratch1, scratch2, MemOperand(frame, offset0));
- } else {
- // Registers are not adjacent: store individually.
- __ Str(scratch1, MemOperand(frame, offset0));
- __ Str(scratch2, MemOperand(frame, offset1));
- }
- }
- if ((reg_count & 1) != 0) {
- __ Peek(scratch1, src_offset + (reg_count - 1) * reg_size);
- offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
- __ Str(scratch1, MemOperand(frame, offset0));
- }
-}
-
-#undef __
#define __ masm()->
@@ -214,13 +170,23 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
- CopyRegisterDumpToFrame(masm(), x1, saved_registers, x2, x4, 0,
- FrameDescription::registers_offset());
+ CPURegList copy_to_input = saved_registers;
+ for (int i = 0; i < saved_registers.Count(); i++) {
+ __ Peek(x2, i * kPointerSize);
+ CPURegister current_reg = copy_to_input.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Str(x2, MemOperand(x1, offset));
+ }
// Copy FP registers to the input frame.
- CopyRegisterDumpToFrame(masm(), x1, saved_fp_registers, x2, x4,
- kFPRegistersOffset,
- FrameDescription::double_registers_offset());
+ for (int i = 0; i < saved_fp_registers.Count(); i++) {
+ int dst_offset = FrameDescription::double_registers_offset() +
+ (i * kDoubleSize);
+ int src_offset = kFPRegistersOffset + (i * kDoubleSize);
+ __ Peek(x2, src_offset);
+ __ Str(x2, MemOperand(x1, dst_offset));
+ }
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
@@ -284,7 +250,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+ DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
int src_offset = FrameDescription::double_registers_offset();
@@ -311,7 +277,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
- ASSERT(!saved_registers.IncludesAliasOf(lr));
+ DCHECK(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
@@ -354,14 +320,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// The number of entry will never exceed kMaxNumberOfEntries.
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
// a movz instruction to load the entry id.
- ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
+ DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movz(entry_id, i);
__ b(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
}
__ Bind(&done);
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index e9e1decad..b8b1d5d25 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -3,19 +3,19 @@
// found in the LICENSE file.
#include <assert.h>
-#include <stdio.h>
#include <stdarg.h>
+#include <stdio.h>
#include <string.h>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "disasm.h"
-#include "arm64/decoder-arm64-inl.h"
-#include "arm64/disasm-arm64.h"
-#include "macro-assembler.h"
-#include "platform.h"
+#include "src/arm64/decoder-arm64-inl.h"
+#include "src/arm64/disasm-arm64.h"
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -258,7 +258,7 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
- ASSERT((reg_size == kXRegSizeInBits) ||
+ DCHECK((reg_size == kXRegSizeInBits) ||
((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
// Test for movz: 16-bits set at positions 0, 16, 32 or 48.
@@ -1176,7 +1176,7 @@ void Disassembler::VisitSystem(Instruction* instr) {
}
}
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
- ASSERT(instr->Mask(SystemHintMask) == HINT);
+ DCHECK(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
case NOP: {
mnemonic = "nop";
@@ -1246,7 +1246,7 @@ void Disassembler::Format(Instruction* instr, const char* mnemonic,
const char* format) {
// TODO(mcapewel) don't think I can use the instr address here - there needs
// to be a base address too
- ASSERT(mnemonic != NULL);
+ DCHECK(mnemonic != NULL);
ResetOutput();
Substitute(instr, mnemonic);
if (format != NULL) {
@@ -1364,7 +1364,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
int Disassembler::SubstituteImmediateField(Instruction* instr,
const char* format) {
- ASSERT(format[0] == 'I');
+ DCHECK(format[0] == 'I');
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
@@ -1372,7 +1372,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
- ASSERT(format[5] == 'L');
+ DCHECK(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
@@ -1384,7 +1384,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
AppendToOutput("pc%+" PRId64,
- instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ instr->ImmLLiteral() << kLoadLiteralScaleLog2);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
@@ -1417,7 +1417,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
return 6;
}
case 'A': { // IAddSub.
- ASSERT(instr->ShiftAddSub() <= 1);
+ DCHECK(instr->ShiftAddSub() <= 1);
int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
return 7;
@@ -1474,7 +1474,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
const char* format) {
- ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ DCHECK((format[0] == 'I') && (format[1] == 'B'));
unsigned r = instr->ImmR();
unsigned s = instr->ImmS();
@@ -1488,13 +1488,13 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
AppendToOutput("#%d", s + 1);
return 5;
} else {
- ASSERT(format[3] == '-');
+ DCHECK(format[3] == '-');
AppendToOutput("#%d", s - r + 1);
return 7;
}
}
case 'Z': { // IBZ-r.
- ASSERT((format[3] == '-') && (format[4] == 'r'));
+ DCHECK((format[3] == '-') && (format[4] == 'r'));
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
: kWRegSizeInBits;
AppendToOutput("#%d", reg_size - r);
@@ -1510,7 +1510,7 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
int Disassembler::SubstituteLiteralField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "LValue", 6) == 0);
+ DCHECK(strncmp(format, "LValue", 6) == 0);
USE(format);
switch (instr->Mask(LoadLiteralMask)) {
@@ -1526,12 +1526,12 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'H');
- ASSERT(instr->ShiftDP() <= 0x3);
+ DCHECK(format[0] == 'H');
+ DCHECK(instr->ShiftDP() <= 0x3);
switch (format[1]) {
case 'D': { // HDP.
- ASSERT(instr->ShiftDP() != ROR);
+ DCHECK(instr->ShiftDP() != ROR);
} // Fall through.
case 'L': { // HLo.
if (instr->ImmDPShift() != 0) {
@@ -1550,7 +1550,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
int Disassembler::SubstituteConditionField(Instruction* instr,
const char* format) {
- ASSERT(format[0] == 'C');
+ DCHECK(format[0] == 'C');
const char* condition_code[] = { "eq", "ne", "hs", "lo",
"mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt",
@@ -1559,7 +1559,7 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
switch (format[1]) {
case 'B': cond = instr->ConditionBranch(); break;
case 'I': {
- cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ cond = NegateCondition(static_cast<Condition>(instr->Condition()));
break;
}
default: cond = instr->Condition();
@@ -1572,12 +1572,12 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
const char* format) {
USE(format);
- ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+ DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
int offset = instr->ImmPCRel();
// Only ADR (AddrPCRelByte) is supported.
- ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+ DCHECK(strcmp(format, "AddrPCRelByte") == 0);
char sign = '+';
if (offset < 0) {
@@ -1592,7 +1592,7 @@ int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
int Disassembler::SubstituteBranchTargetField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "BImm", 4) == 0);
+ DCHECK(strncmp(format, "BImm", 4) == 0);
int64_t offset = 0;
switch (format[5]) {
@@ -1619,8 +1619,8 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
int Disassembler::SubstituteExtendField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "Ext", 3) == 0);
- ASSERT(instr->ExtendMode() <= 7);
+ DCHECK(strncmp(format, "Ext", 3) == 0);
+ DCHECK(instr->ExtendMode() <= 7);
USE(format);
const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
@@ -1646,7 +1646,7 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
const char* format) {
- ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ DCHECK(strncmp(format, "Offsetreg", 9) == 0);
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
"undefined", "undefined", "sxtw", "sxtx" };
USE(format);
@@ -1675,7 +1675,7 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
int Disassembler::SubstitutePrefetchField(Instruction* instr,
const char* format) {
- ASSERT(format[0] == 'P');
+ DCHECK(format[0] == 'P');
USE(format);
int prefetch_mode = instr->PrefetchMode();
@@ -1690,7 +1690,7 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr,
int Disassembler::SubstituteBarrierField(Instruction* instr,
const char* format) {
- ASSERT(format[0] == 'M');
+ DCHECK(format[0] == 'M');
USE(format);
static const char* options[4][4] = {
@@ -1734,7 +1734,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
@@ -1752,7 +1752,7 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
if (ureg == v8::internal::kZeroRegCode) {
return "xzr";
}
- v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg);
+ v8::internal::SNPrintF(tmp_buffer_, "x%u", ureg);
return tmp_buffer_.start();
}
@@ -1786,7 +1786,7 @@ class BufferDisassembler : public v8::internal::Disassembler {
~BufferDisassembler() { }
virtual void ProcessOutput(v8::internal::Instruction* instr) {
- v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput());
+ v8::internal::SNPrintF(out_buffer_, "%s", GetOutput());
}
private:
@@ -1797,7 +1797,7 @@ Disassembler::Disassembler(const NameConverter& converter)
: converter_(converter) {}
-Disassembler::~Disassembler() {}
+Disassembler::~Disassembler() { USE(converter_); }
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index 42552a2d8..8cd3b80db 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -5,12 +5,12 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
-#include "v8.h"
+#include "src/v8.h"
-#include "globals.h"
-#include "utils.h"
-#include "instructions-arm64.h"
-#include "decoder-arm64.h"
+#include "src/arm64/decoder-arm64.h"
+#include "src/arm64/instructions-arm64.h"
+#include "src/globals.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
index da638ad6e..b3633e07b 100644
--- a/deps/v8/src/arm64/frames-arm64.cc
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "assembler.h"
-#include "assembler-arm64.h"
-#include "assembler-arm64-inl.h"
-#include "frames.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/assembler.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 3996bd75d..8d4ce8619 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "arm64/constants-arm64.h"
-#include "arm64/assembler-arm64.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/constants-arm64.h"
#ifndef V8_ARM64_FRAMES_ARM64_H_
#define V8_ARM64_FRAMES_ARM64_H_
@@ -15,7 +15,6 @@ const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
index 0196e69e4..2e63814bd 100644
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
-#include "arm64/code-stubs-arm64.h"
-#include "arm64/macro-assembler-arm64.h"
+#include "src/arm64/code-stubs-arm64.h"
+#include "src/arm64/macro-assembler-arm64.h"
namespace v8 {
namespace internal {
@@ -34,18 +34,18 @@ class JumpPatchSite BASE_EMBEDDED {
~JumpPatchSite() {
if (patch_site_.is_bound()) {
- ASSERT(info_emitted_);
+ DCHECK(info_emitted_);
} else {
- ASSERT(reg_.IsNone());
+ DCHECK(reg_.IsNone());
}
}
void EmitJumpIfNotSmi(Register reg, Label* target) {
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
- ASSERT(!info_emitted_);
- ASSERT(reg.Is64Bits());
- ASSERT(!reg.Is(csp));
+ DCHECK(!info_emitted_);
+ DCHECK(reg.Is64Bits());
+ DCHECK(!reg.Is(csp));
reg_ = reg;
__ bind(&patch_site_);
__ tbz(xzr, 0, target); // Always taken before patched.
@@ -54,9 +54,9 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitJumpIfSmi(Register reg, Label* target) {
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
- ASSERT(!info_emitted_);
- ASSERT(reg.Is64Bits());
- ASSERT(!reg.Is(csp));
+ DCHECK(!info_emitted_);
+ DCHECK(reg.Is64Bits());
+ DCHECK(!reg.Is(csp));
reg_ = reg;
__ bind(&patch_site_);
__ tbnz(xzr, 0, target); // Never taken before patched.
@@ -87,29 +87,6 @@ class JumpPatchSite BASE_EMBEDDED {
};
-static void EmitStackCheck(MacroAssembler* masm_,
- int pointers = 0,
- Register scratch = jssp) {
- Isolate* isolate = masm_->isolate();
- Label ok;
- ASSERT(jssp.Is(__ StackPointer()));
- ASSERT(scratch.Is(jssp) == (pointers == 0));
- Heap::RootListIndex index;
- if (pointers != 0) {
- __ Sub(scratch, jssp, pointers * kPointerSize);
- index = Heap::kRealStackLimitRootIndex;
- } else {
- index = Heap::kStackLimitRootIndex;
- }
- __ CompareRoot(scratch, index);
- __ B(hs, &ok);
- PredictableCodeSizeScope predictable(masm_,
- Assembler::kCallSizeWithRelocation);
- __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ Bind(&ok);
-}
-
-
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -153,7 +130,7 @@ void FullCodeGenerator::Generate() {
__ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
__ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
__ Poke(x10, receiver_offset);
__ Bind(&ok);
@@ -170,18 +147,24 @@ void FullCodeGenerator::Generate() {
// Push(lr, fp, cp, x1);
// Add(fp, jssp, 2 * kPointerSize);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
+ __ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
// Reserve space on the stack for locals.
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- ASSERT(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
- EmitStackCheck(masm_, locals_count, x10);
+ Label ok;
+ DCHECK(jssp.Is(__ StackPointer()));
+ __ Sub(x10, jssp, locals_count * kPointerSize);
+ __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ Bind(&ok);
}
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
if (FLAG_optimize_for_size) {
@@ -211,16 +194,19 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo()));
__ Push(x1, x10);
- __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
__ Push(x1);
- __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register_x1 = false;
// Context is returned in x0. It replaces the context passed to us.
@@ -241,8 +227,15 @@ void FullCodeGenerator::Generate() {
__ Str(x10, target);
// Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -298,9 +291,9 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Declarations");
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
+ DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -309,13 +302,20 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- EmitStackCheck(masm_);
+ Label ok;
+ DCHECK(jssp.Is(__ StackPointer()));
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
+ DCHECK(loop_depth() == 0);
VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ DCHECK(loop_depth() == 0);
}
// Always emit a 'return undefined' in case control fell off the end of
@@ -347,7 +347,7 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (isolate()->IsDebuggerActive()) {
+ if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
}
@@ -359,13 +359,13 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting back edge code.
Assembler::BlockPoolsScope block_const_pool(masm_);
Label ok;
- ASSERT(back_edge_target->is_bound());
+ DCHECK(back_edge_target->is_bound());
// We want to do a round rather than a floor of distance/kCodeSizeMultiplier
// to reduce the absolute error due to the integer division. To do that,
// we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
@@ -407,7 +407,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Runtime::TraceExit returns its parameter in x0.
__ Push(result_register());
__ CallRuntime(Runtime::kTraceExit, 1);
- ASSERT(x0.Is(result_register()));
+ DCHECK(x0.Is(result_register()));
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -441,7 +441,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// of the generated code must be consistent.
const Register& current_sp = __ StackPointer();
// Nothing ensures 16 bytes alignment here.
- ASSERT(!current_sp.Is(csp));
+ DCHECK(!current_sp.Is(csp));
__ mov(current_sp, fp);
int no_frame_start = masm_->pc_offset();
__ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
@@ -449,7 +449,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// TODO(all): This implementation is overkill as it supports 2**31+1
// arguments, consider how to improve it without creating a security
// hole.
- __ LoadLiteral(ip0, 3 * kInstructionSize);
+ __ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
__ add(current_sp, current_sp, ip0);
__ ret();
__ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
@@ -460,25 +460,25 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
}
void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
__ Push(result_register());
}
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
@@ -542,7 +542,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ B(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -569,7 +569,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::EffectContext::DropAndPlug(int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
__ Drop(count);
}
@@ -577,7 +577,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count,
void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
__ Drop(count);
__ Move(result_register(), reg);
}
@@ -585,7 +585,7 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
if (count > 1) __ Drop(count - 1);
__ Poke(reg, 0);
}
@@ -593,7 +593,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
void FullCodeGenerator::TestContext::DropAndPlug(int count,
Register reg) const {
- ASSERT(count > 0);
+ DCHECK(count > 0);
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Mov(result_register(), reg);
@@ -604,7 +604,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
+ DCHECK(materialize_true == materialize_false);
__ Bind(materialize_true);
}
@@ -638,8 +638,8 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
+ DCHECK(materialize_true == true_label_);
+ DCHECK(materialize_false == false_label_);
}
@@ -700,8 +700,8 @@ void FullCodeGenerator::Split(Condition cond,
if (if_false == fall_through) {
__ B(cond, if_true);
} else if (if_true == fall_through) {
- ASSERT(if_false != fall_through);
- __ B(InvertCondition(cond), if_false);
+ DCHECK(if_false != fall_through);
+ __ B(NegateCondition(cond), if_false);
} else {
__ B(cond, if_true);
__ B(if_false);
@@ -723,7 +723,7 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) {
MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
@@ -745,8 +745,8 @@ void FullCodeGenerator::SetVar(Variable* var,
Register src,
Register scratch0,
Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!AreAliased(src, scratch0, scratch1));
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ DCHECK(!AreAliased(src, scratch0, scratch1));
MemOperand location = VarOperand(var, scratch0);
__ Str(src, location);
@@ -789,7 +789,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
@@ -844,7 +844,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
__ Mov(x2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
+ DCHECK(IsDeclaredVariableMode(mode));
PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
: NONE;
__ Mov(x1, Smi::FromInt(attr));
@@ -859,7 +859,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
// Pushing 0 (xzr) indicates no initial value.
__ Push(cp, x2, x1, xzr);
}
- __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
break;
}
}
@@ -874,7 +874,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
+ Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
// Check for stack overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
@@ -915,7 +915,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, x2, x1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
break;
}
}
@@ -924,8 +924,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
+ DCHECK(variable->location() == Variable::CONTEXT);
+ DCHECK(variable->interface()->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -990,7 +990,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
}
__ Push(cp, x11, flags);
- __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
@@ -998,7 +998,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
// Return value is ignored.
}
@@ -1165,11 +1165,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ Push(x0); // Map.
- __ Mov(x0, Smi::FromInt(0));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
__ SmiTag(x1);
- __ Push(x2, x1, x0);
+ // Map, enumeration cache, enum cache length, zero (both last as smis).
+ __ Push(x0, x2, x1, xzr);
__ B(&loop);
__ Bind(&no_descriptors);
@@ -1188,11 +1186,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
// TODO(all): similar check was done already. Can we avoid it here?
__ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
- ASSERT(Smi::FromInt(0) == 0);
+ DCHECK(Smi::FromInt(0) == 0);
__ CzeroX(x1, le); // Zero indicates proxy.
- __ Push(x1, x0); // Smi and array
- __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
- __ Push(x1, xzr); // Fixed array length (as smi) and initial index.
+ __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
+ // Smi and array, fixed array length (as smi) and initial index.
+ __ Push(x1, x0, x2, xzr);
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
@@ -1273,26 +1271,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Iteration loop_statement(this, stmt);
increment_loop_depth();
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
-
- // As with for-in, skip the loop if the iterator is null or undefined.
- Register iterator = x0;
- __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
- loop_statement.break_label());
- __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
- loop_statement.break_label());
-
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(iterator, &convert);
- __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE);
- __ B(ge, &done_convert);
- __ Bind(&convert);
- __ Push(iterator);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Bind(&done_convert);
- __ Push(iterator);
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ Bind(loop_statement.continue_label());
@@ -1349,7 +1329,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, x11, x10);
- __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+ __ CallRuntime(Runtime::kNewClosure, 3);
}
context()->Plug(x0);
}
@@ -1361,7 +1341,7 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
Register current = cp;
@@ -1404,8 +1384,13 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ Bind(&fast);
}
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Mov(x2, Operand(var->name()));
+ __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadIC::NameRegister(), Operand(proxy->var()->name()));
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(proxy->VariableFeedbackSlot()));
+ }
+
ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
: CONTEXTUAL;
CallLoadIC(mode);
@@ -1414,7 +1399,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Label* slow) {
- ASSERT(var->IsContextSlot());
+ DCHECK(var->IsContextSlot());
Register context = cp;
Register next = x10;
Register temp = x11;
@@ -1442,7 +1427,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
}
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow,
Label* done) {
@@ -1451,8 +1436,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
+ Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
__ B(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1465,7 +1451,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else { // LET || CONST
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
__ B(done);
@@ -1483,10 +1469,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in x2 and the global
- // object (receiver) in x0.
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Mov(x2, Operand(var->name()));
+ __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadIC::NameRegister(), Operand(var->name()));
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(proxy->VariableFeedbackSlot()));
+ }
CallLoadIC(CONTEXTUAL);
context()->Plug(x0);
break;
@@ -1504,7 +1492,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// always looked up dynamically, i.e. in that case
// var->location() == LOOKUP.
// always holds.
- ASSERT(var->scope() != NULL);
+ DCHECK(var->scope() != NULL);
// Check if the binding really needs an initialization check. The check
// can be skipped in the following situation: we have a LET or CONST
@@ -1527,8 +1515,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
skip_init_check = false;
} else {
// Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+ DCHECK(proxy->position() != RelocInfo::kNoPosition);
skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1543,11 +1531,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// binding in harmony mode.
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
__ Bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY);
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
__ Bind(&done);
}
@@ -1563,12 +1551,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Label done, slow;
// Generate code for loading from variables potentially shadowed by
// eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
__ Bind(&slow);
Comment cmnt(masm_, "Lookup variable");
__ Mov(x1, Operand(var->name()));
__ Push(cp, x1); // Context and name.
- __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Bind(&done);
context()->Plug(x0);
break;
@@ -1600,7 +1588,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ Mov(x2, Operand(expr->pattern()));
__ Mov(x1, Operand(expr->flags()));
__ Push(x4, x3, x2, x1);
- __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ Mov(x5, x0);
__ Bind(&materialized);
@@ -1612,7 +1600,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ Bind(&runtime_allocate);
__ Mov(x10, Smi::FromInt(size));
__ Push(x5, x10);
- __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ Pop(x5);
__ Bind(&allocated);
@@ -1655,10 +1643,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
const int max_cloned_properties =
FastCloneShallowObjectStub::kMaximumClonedProperties;
if (expr->may_store_doubles() || expr->depth() > 1 ||
- Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > max_cloned_properties) {
__ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
@@ -1688,14 +1676,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- __ Mov(x2, Operand(key->value()));
- __ Peek(x1, 0);
+ DCHECK(StoreIC::ValueRegister().is(x0));
+ __ Mov(StoreIC::NameRegister(), Operand(key->value()));
+ __ Peek(StoreIC::ReceiverRegister(), 0);
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1709,7 +1698,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x0);
VisitForStackValue(key);
VisitForStackValue(value);
- __ Mov(x0, Smi::FromInt(NONE)); // PropertyAttributes
+ __ Mov(x0, Smi::FromInt(SLOPPY)); // Strict mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1749,11 +1738,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
if (expr->has_function()) {
- ASSERT(result_saved);
+ DCHECK(result_saved);
__ Peek(x0, 0);
__ Push(x0);
__ CallRuntime(Runtime::kToFastProperties, 1);
@@ -1777,7 +1766,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
+ DCHECK_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
@@ -1795,35 +1784,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_elements));
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- isolate(),
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(
- isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
- } else if ((expr->depth() > 1) || Serializer::enabled(isolate()) ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
__ Mov(x0, Smi::FromInt(flags));
__ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
- FastCloneShallowArrayStub stub(isolate(),
- mode,
- allocation_site_mode,
- length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1838,8 +1804,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ Push(x0);
- __ Push(Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Smi::FromInt(expr->literal_index()));
+ __ Push(x0, x1);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1872,7 +1838,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- ASSERT(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
@@ -1894,9 +1860,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ Push(result_register());
+ // We need the receiver both on the stack and in the register.
+ VisitForStackValue(property->obj());
+ __ Peek(LoadIC::ReceiverRegister(), 0);
} else {
VisitForStackValue(property->obj());
}
@@ -1904,9 +1870,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ Peek(x1, 0);
- __ Push(x0);
+ VisitForStackValue(property->key());
+ __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize);
+ __ Peek(LoadIC::NameRegister(), 0);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1983,9 +1949,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
- __ Mov(x2, Operand(key->value()));
- // Call load IC. It has arguments receiver and property name x0 and x2.
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ __ Mov(LoadIC::NameRegister(), Operand(key->value()));
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(prop->PropertyFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ }
}
@@ -1993,7 +1964,13 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, prop->PropertyFeedbackId());
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(prop->PropertyFeedbackSlot()));
+ CallIC(ic);
+ } else {
+ CallIC(ic, prop->PropertyFeedbackId());
+ }
}
@@ -2064,11 +2041,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
case Token::MUL: {
Label not_minus_zero, done;
+ STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == (kXRegSizeInBits / 2));
+ STATIC_ASSERT(kSmiTag == 0);
__ Smulh(x10, left, right);
__ Cbnz(x10, &not_minus_zero);
__ Eor(x11, left, right);
__ Tbnz(x11, kXSignBit, &stub_call);
- STATIC_ASSERT(kSmiTag == 0);
__ Mov(result, x10);
__ B(&done);
__ Bind(&not_minus_zero);
@@ -2113,7 +2091,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- ASSERT(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2138,9 +2116,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj());
// TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
// this copy.
- __ Mov(x1, x0);
- __ Pop(x0); // Restore value.
- __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Mov(StoreIC::ReceiverRegister(), x0);
+ __ Pop(StoreIC::ValueRegister()); // Restore value.
+ __ Mov(StoreIC::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
CallStoreIC();
break;
}
@@ -2148,8 +2127,8 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Push(x0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Mov(x1, x0);
- __ Pop(x2, x0);
+ __ Mov(KeyedStoreIC::NameRegister(), x0);
+ __ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::ValueRegister());
Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -2174,38 +2153,24 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitCallStoreContextSlot(
- Handle<String> name, StrictMode strict_mode) {
- __ Mov(x11, Operand(name));
- __ Mov(x10, Smi::FromInt(strict_mode));
- // jssp[0] : mode.
- // jssp[8] : name.
- // jssp[16] : context.
- // jssp[24] : value.
- __ Push(x0, cp, x11, x10);
- __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
-}
-
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ Mov(x2, Operand(var->name()));
- __ Ldr(x1, GlobalObjectMemOperand());
+ __ Mov(StoreIC::NameRegister(), Operand(var->name()));
+ __ Ldr(StoreIC::ReceiverRegister(), GlobalObjectMemOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
+ DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
- __ Push(x0);
- __ Mov(x0, Operand(var->name()));
- __ Push(cp, x0); // Context and name.
- __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ __ Mov(x1, Operand(var->name()));
+ __ Push(x0, cp, x1);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
- ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, x1);
__ Ldr(x10, location);
@@ -2216,29 +2181,34 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), strict_mode());
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
- __ Mov(x10, Operand(var->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
- // Perform the assignment.
- __ Bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
- }
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ // Perform the assignment.
+ __ Bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), strict_mode());
+ // Assignment to var.
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Smi::FromInt(strict_mode()));
+ // jssp[0] : mode.
+ // jssp[8] : name.
+ // jssp[16] : context.
+ // jssp[24] : value.
+ __ Push(x0, cp, x11, x10);
+ __ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, x1);
if (FLAG_debug_code && op == Token::INIT_LET) {
__ Ldr(x10, location);
@@ -2256,14 +2226,13 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
+ DCHECK(prop != NULL);
+ DCHECK(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
- __ Pop(x1);
-
+ __ Mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(StoreIC::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2278,7 +2247,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
// TODO(all): Could we pass this in registers rather than on the stack?
- __ Pop(x1, x2); // Key and object holding the property.
+ __ Pop(KeyedStoreIC::NameRegister(), KeyedStoreIC::ReceiverRegister());
+ DCHECK(KeyedStoreIC::ValueRegister().is(x0));
Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
@@ -2296,13 +2266,15 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadIC::ReceiverRegister(), x0);
EmitNamedPropertyLoad(expr);
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(x0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Pop(x1);
+ __ Move(LoadIC::NameRegister(), x0);
+ __ Pop(LoadIC::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
context()->Plug(x0);
}
@@ -2337,8 +2309,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ Push(isolate()->factory()->undefined_value());
} else {
// Load the function from the receiver.
- ASSERT(callee->IsProperty());
- __ Peek(x0, 0);
+ DCHECK(callee->IsProperty());
+ __ Peek(LoadIC::ReceiverRegister(), 0);
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
@@ -2359,8 +2331,9 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* callee = expr->expression();
// Load the function from the receiver.
- ASSERT(callee->IsProperty());
- __ Peek(x1, 0);
+ DCHECK(callee->IsProperty());
+ __ Peek(LoadIC::ReceiverRegister(), 0);
+ __ Move(LoadIC::NameRegister(), x0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2413,19 +2386,16 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
- // Push.
- __ Push(x10, x11);
-
// Prepare to push the language mode.
- __ Mov(x10, Smi::FromInt(strict_mode()));
+ __ Mov(x12, Smi::FromInt(strict_mode()));
// Prepare to push the start position of the scope the calls resides in.
- __ Mov(x11, Smi::FromInt(scope()->start_position()));
+ __ Mov(x13, Smi::FromInt(scope()->start_position()));
// Push.
- __ Push(x10, x11);
+ __ Push(x10, x11, x12, x13);
// Do the runtime call.
- __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2493,16 +2463,15 @@ void FullCodeGenerator::VisitCall(Call* expr) {
{ PreservePositionScope scope(masm()->positions_recorder());
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
}
__ Bind(&slow);
// Call the runtime to find the function to call (returned in x0)
// and the object holding it (returned in x1).
- __ Push(context_register());
__ Mov(x10, Operand(proxy->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(context_register(), x10);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Push(x0, x1); // Receiver, function.
// If fast case code has been generated, emit code to push the
@@ -2513,11 +2482,10 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ B(&call);
__ Bind(&done);
// Push function.
- __ Push(x0);
// The receiver is implicitly the global receiver. Indicate this
// by passing the undefined to the call function stub.
__ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x1);
+ __ Push(x0, x1);
__ Bind(&call);
}
@@ -2536,7 +2504,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
} else {
- ASSERT(call_type == Call::OTHER_CALL);
+ DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
@@ -2549,7 +2517,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
+ DCHECK(expr->return_is_recorded_);
#endif
}
@@ -2583,7 +2551,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
if (FLAG_pretenuring_call_new) {
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ DCHECK(expr->AllocationSiteFeedbackSlot() ==
expr->CallNewFeedbackSlot() + 1);
}
@@ -2599,7 +2567,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2619,7 +2587,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2630,9 +2598,10 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
+ uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1);
+
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
- if_false, fall_through);
+ __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -2640,7 +2609,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2670,7 +2639,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2693,7 +2662,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2718,7 +2687,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false, skip_lookup;
@@ -2819,7 +2788,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2841,7 +2810,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2868,7 +2837,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2890,7 +2859,7 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2912,7 +2881,7 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
+ DCHECK(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2944,7 +2913,7 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
// Load the two objects into registers and perform the comparison.
VisitForStackValue(args->at(0));
@@ -2968,7 +2937,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
// ArgumentsAccessStub expects the key in x1.
VisitForAccumulatorValue(args->at(0));
@@ -2981,7 +2950,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
+ DCHECK(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
__ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
@@ -3004,7 +2973,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitClassOf");
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
Label done, null, function, non_function_constructor;
VisitForAccumulatorValue(args->at(0));
@@ -3069,7 +3038,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
+ DCHECK(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
@@ -3082,7 +3051,7 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
+ DCHECK(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
@@ -3095,7 +3064,7 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitValueOf");
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label done;
@@ -3112,8 +3081,8 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK(args->length() == 2);
+ DCHECK_NE(NULL, args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3150,7 +3119,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ Bind(&not_date_object);
- __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
__ Bind(&done);
context()->Plug(x0);
}
@@ -3158,7 +3127,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
+ DCHECK_EQ(3, args->length());
Register string = x0;
Register index = x1;
@@ -3188,7 +3157,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
+ DCHECK_EQ(3, args->length());
Register string = x0;
Register index = x1;
@@ -3219,7 +3188,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the MathPow stub.
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
MathPowStub stub(isolate(), MathPowStub::ON_STACK);
@@ -3230,7 +3199,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ Pop(x1);
@@ -3259,7 +3228,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
+ DCHECK_EQ(args->length(), 1);
// Load the argument into x0 and call the stub.
VisitForAccumulatorValue(args->at(0));
@@ -3272,7 +3241,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3294,7 +3263,7 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
@@ -3339,7 +3308,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
@@ -3386,7 +3355,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
+ DCHECK_EQ(2, args->length());
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
@@ -3401,7 +3370,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
+ DCHECK_EQ(2, args->length());
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3414,7 +3383,7 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
+ DCHECK(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
for (int i = 0; i < arg_count + 1; i++) {
@@ -3446,7 +3415,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
+ DCHECK(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(2));
@@ -3458,8 +3427,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_EQ(2, args->length());
+ DCHECK_NE(NULL, args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -3497,7 +3466,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
__ Bind(&done);
context()->Plug(x0);
@@ -3526,7 +3495,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
+ DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
__ AssertString(x0);
@@ -3542,7 +3511,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
+ DCHECK(args->length() == 2);
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(0));
@@ -3754,6 +3723,17 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+ DCHECK(expr->arguments()->length() == 0);
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ __ Mov(x10, debug_is_active);
+ __ Ldrb(x0, MemOperand(x10));
+ __ SmiTag(x0);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->function() != NULL &&
expr->function()->intrinsic_type == Runtime::INLINE) {
@@ -3769,13 +3749,20 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
- __ Push(x0);
+ __ Ldr(LoadIC::ReceiverRegister(),
+ FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
+ __ Push(LoadIC::ReceiverRegister());
// Load the function from the receiver.
Handle<String> name = expr->name();
- __ Mov(x2, Operand(name));
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+ __ Mov(LoadIC::NameRegister(), Operand(name));
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+ }
// Push the target function under the receiver.
__ Pop(x10);
@@ -3827,7 +3814,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ Ldr(x12, GlobalObjectMemOperand());
__ Mov(x11, Operand(var->name()));
@@ -3844,7 +3831,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ Mov(x2, Operand(var->name()));
__ Push(context_register(), x2);
- __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
context()->Plug(x0);
}
} else {
@@ -3877,7 +3864,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
test->fall_through());
context()->Plug(test->true_label(), test->false_label());
} else {
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
// TODO(jbramley): This could be much more efficient using (for
// example) the CSEL instruction.
Label materialize_true, materialize_false, done;
@@ -3920,7 +3907,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- ASSERT(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -3939,7 +3926,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
@@ -3948,16 +3935,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Push(xzr);
}
if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ Push(x0);
+ // Put the object both on the stack and in the register.
+ VisitForStackValue(prop->obj());
+ __ Peek(LoadIC::ReceiverRegister(), 0);
EmitNamedPropertyLoad(prop);
} else {
// KEYED_PROPERTY
VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ Peek(x1, 0);
- __ Push(x0);
+ VisitForStackValue(prop->key());
+ __ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize);
+ __ Peek(LoadIC::NameRegister(), 0);
EmitKeyedPropertyLoad(prop);
}
}
@@ -4067,8 +4054,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
- __ Pop(x1);
+ __ Mov(StoreIC::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(StoreIC::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -4081,8 +4069,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(x1); // Key.
- __ Pop(x2); // Receiver.
+ __ Pop(KeyedStoreIC::NameRegister());
+ __ Pop(KeyedStoreIC::ReceiverRegister());
Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -4102,13 +4090,17 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
+ DCHECK(!context()->IsEffect());
+ DCHECK(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Mov(x2, Operand(proxy->name()));
+ __ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadIC::NameRegister(), Operand(proxy->name()));
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(proxy->VariableFeedbackSlot()));
+ }
// Use a regular load, not a contextual load, to avoid a reference
// error.
CallLoadIC(NOT_CONTEXTUAL);
@@ -4119,12 +4111,12 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
__ Bind(&slow);
__ Mov(x0, Operand(proxy->name()));
__ Push(cp, x0);
- __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ Bind(&done);
@@ -4178,11 +4170,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
__ CompareRoot(x0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- String::Equals(check, factory->null_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
- __ CompareRoot(x0, Heap::kNullValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
ASM_LOCATION(
"FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
@@ -4204,9 +4191,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->object_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
__ JumpIfSmi(x0, if_false);
- if (!FLAG_harmony_typeof) {
- __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- }
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
// Check for JS objects => true.
Register map = x10;
__ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
@@ -4365,7 +4350,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
- ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+ DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
__ Mov(x1, Smi::FromInt(continuation.pos()));
__ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
__ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
@@ -4376,7 +4361,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Cmp(__ StackPointer(), x1);
__ B(eq, &post_runtime);
__ Push(x0); // generator object
- __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&post_runtime);
__ Pop(result_register());
@@ -4408,6 +4393,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop;
+ Register load_receiver = LoadIC::ReceiverRegister();
+ Register load_name = LoadIC::NameRegister();
+
// Initial send value is undefined.
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
__ B(&l_next);
@@ -4415,9 +4403,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ Bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(x2, x3, x0); // "throw", iter, except
+ __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(load_name, x3, x0); // "throw", iter, except
__ B(&l_call);
// try { received = %yield result }
@@ -4440,14 +4428,14 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + handler_size;
__ Peek(x0, generator_object_depth);
__ Push(x0); // g
- ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
+ DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
__ Mov(x1, Smi::FromInt(l_continuation.pos()));
__ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
__ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
__ Mov(x1, cp);
__ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Pop(x0); // result
EmitReturnSequence();
@@ -4456,14 +4444,19 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// receiver = iter; f = 'next'; arg = received;
__ Bind(&l_next);
- __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(x2, x3, x0); // "next", iter, received
+
+ __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(load_name, x3, x0); // "next", iter, received
// result = receiver[f](arg);
__ Bind(&l_call);
- __ Peek(x1, 1 * kPointerSize);
- __ Peek(x0, 2 * kPointerSize);
+ __ Peek(load_receiver, 1 * kPointerSize);
+ __ Peek(load_name, 2 * kPointerSize);
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
+ }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0);
@@ -4476,19 +4469,29 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// if (!result.done) goto l_try;
__ Bind(&l_loop);
- __ Push(x0); // save result
- __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done"
- CallLoadIC(NOT_CONTEXTUAL); // result.done in x0
+ __ Move(load_receiver, x0);
+
+ __ Push(load_receiver); // save result
+ __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(expr->DoneFeedbackSlot()));
+ }
+ CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
// The ToBooleanStub argument (result.done) is in x0.
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ Cbz(x0, &l_try);
// result.value
- __ Pop(x0); // result
- __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value"
- CallLoadIC(NOT_CONTEXTUAL); // result.value in x0
- context()->DropAndPlug(2, x0); // drop iter and g
+ __ Pop(load_receiver); // result
+ __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
+ if (FLAG_vector_ics) {
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(expr->ValueFeedbackSlot()));
+ }
+ CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
+ context()->DropAndPlug(2, x0); // drop iter and g
break;
}
}
@@ -4506,7 +4509,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Register function = x4;
// The value stays in x0, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed. r1
// will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -4588,7 +4591,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Mov(x10, Smi::FromInt(resume_mode));
__ Push(generator_object, result_register(), x10);
- __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Unreachable();
@@ -4603,14 +4606,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ Push(value_reg);
- __ CallRuntime(Runtime::kHiddenThrow, 1);
+ __ CallRuntime(Runtime::kThrow, 1);
}
__ B(&done);
// Throw error if we attempt to operate on a running generator.
__ Bind(&wrong_state);
__ Push(generator_object);
- __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
__ Bind(&done);
context()->Plug(result_register());
@@ -4631,7 +4634,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ Bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ Ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -4645,7 +4648,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ Pop(result_value);
__ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
__ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
- ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
JSObject::kElementsOffset);
STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
@@ -4685,7 +4688,7 @@ Register FullCodeGenerator::context_register() {
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+ DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
__ Str(value, MemOperand(fp, frame_offset));
}
@@ -4703,7 +4706,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
- ASSERT(kSmiTag == 0);
+ DCHECK(kSmiTag == 0);
__ Push(xzr);
} else if (declaration_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
@@ -4712,7 +4715,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
__ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
__ Push(x10);
} else {
- ASSERT(declaration_scope->is_function_scope());
+ DCHECK(declaration_scope->is_function_scope());
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(x10);
}
@@ -4721,7 +4724,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
- ASSERT(!result_register().is(x10));
+ DCHECK(!result_register().is(x10));
// Preserve the result register while executing finally block.
// Also cook the return address in lr to the stack (smi encoded Code* delta).
__ Sub(x10, lr, Operand(masm_->CodeObject()));
@@ -4753,7 +4756,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
- ASSERT(!result_register().is(x10));
+ DCHECK(!result_register().is(x10));
// Restore pending message from stack.
__ Pop(x10, x11, x12);
@@ -4795,7 +4798,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address branch_address = pc - 3 * kInstructionSize;
PatchingAssembler patcher(branch_address, 1);
- ASSERT(Instruction::Cast(branch_address)
+ DCHECK(Instruction::Cast(branch_address)
->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
(Instruction::Cast(branch_address)->IsCondBranchImm() &&
Instruction::Cast(branch_address)->ImmPCOffset() ==
@@ -4826,7 +4829,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Instruction* load = Instruction::Cast(pc)->preceding(2);
Address interrupt_address_pointer =
reinterpret_cast<Address>(load) + load->ImmPCOffset();
- ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
+ DCHECK((Memory::uint64_at(interrupt_address_pointer) ==
reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
->builtins()
->OnStackReplacement()
diff --git a/deps/v8/src/arm64/ic-arm64.cc b/deps/v8/src/arm64/ic-arm64.cc
index c09b847ba..e08fcfd88 100644
--- a/deps/v8/src/arm64/ic-arm64.cc
+++ b/deps/v8/src/arm64/ic-arm64.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "arm64/assembler-arm64.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "disasm.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -34,51 +34,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
}
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-//
-// "receiver" holds the receiver on entry and is unchanged.
-// "elements" holds the property dictionary on fall through.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register scratch0,
- Register scratch1,
- Label* miss) {
- ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- // Let t be the object instance type, we want:
- // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
- // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
- // check the lower bound.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
- miss, lt);
-
- // scratch0 now contains the map of the receiver and scratch1 the object type.
- Register map = scratch0;
- Register type = scratch1;
-
- // Check if the receiver is a global JS object.
- GenerateGlobalInstanceTypeCheck(masm, type, miss);
-
- // Check that the object does not require access checks.
- __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
- __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
- __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
-
- // Check that the properties dictionary is valid.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -97,8 +52,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Register result,
Register scratch1,
Register scratch2) {
- ASSERT(!AreAliased(elements, name, scratch1, scratch2));
- ASSERT(!AreAliased(result, scratch1, scratch2));
+ DCHECK(!AreAliased(elements, name, scratch1, scratch2));
+ DCHECK(!AreAliased(result, scratch1, scratch2));
Label done;
@@ -144,7 +99,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Register value,
Register scratch1,
Register scratch2) {
- ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
+ DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
Label done;
@@ -192,7 +147,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register scratch,
int interceptor_bit,
Label* slow) {
- ASSERT(!AreAliased(map_scratch, scratch));
+ DCHECK(!AreAliased(map_scratch, scratch));
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
@@ -241,7 +196,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
Register result,
Label* not_fast_array,
Label* slow) {
- ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
+ DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
// Check for fast array.
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -290,7 +245,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
Register hash_scratch,
Label* index_string,
Label* not_unique) {
- ASSERT(!AreAliased(key, map_scratch, hash_scratch));
+ DCHECK(!AreAliased(key, map_scratch, hash_scratch));
// Is the key a name?
Label unique;
@@ -329,7 +284,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register scratch2,
Label* unmapped_case,
Label* slow_case) {
- ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
+ DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
Heap* heap = masm->isolate()->heap();
@@ -384,7 +339,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Register parameter_map,
Register scratch,
Label* slow_case) {
- ASSERT(!AreAliased(key, parameter_map, scratch));
+ DCHECK(!AreAliased(key, parameter_map, scratch));
// Element is in arguments backing store, which is referenced by the
// second element of the parameter_map.
@@ -407,16 +362,17 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
+ // The return address is in lr.
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(name.is(x2));
// Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, x0, x2, x3, x4, x5, x6);
+ masm, flags, receiver, name, x3, x4, x5, x6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -424,38 +380,31 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
+ Register dictionary = x0;
+ DCHECK(!dictionary.is(ReceiverRegister()));
+ DCHECK(!dictionary.is(NameRegister()));
+ Label slow;
- // x1 now holds the property dictionary.
- GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
+ __ Ldr(dictionary,
+ FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4);
__ Ret();
- // Cache miss: Jump to runtime.
- __ Bind(&miss);
- GenerateMiss(masm);
+ // Dictionary load failed, go slow (but don't miss).
+ __ Bind(&slow);
+ GenerateRuntimeGetProperty(masm);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
+ // The return address is in lr.
Isolate* isolate = masm->isolate();
ASM_LOCATION("LoadIC::GenerateMiss");
__ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
// Perform tail call to the entry.
- __ Push(x0, x2);
+ __ Push(ReceiverRegister(), NameRegister());
ExternalReference ref =
ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
@@ -463,29 +412,23 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
-
- __ Push(x0, x2);
+ // The return address is in lr.
+ __ Push(ReceiverRegister(), NameRegister());
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
+ // The return address is in lr.
Register result = x0;
- Register key = x0;
- Register receiver = x1;
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
+
Label miss, unmapped;
- Register map_scratch = x2;
+ Register map_scratch = x0;
MemOperand mapped_location = GenerateMappedArgumentsLookup(
masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
__ Ldr(result, mapped_location);
@@ -495,10 +438,8 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// Parameter map is left in map_scratch when a jump on unmapped is done.
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
- __ Ldr(x2, unmapped_location);
- __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
- // Move the result in x0. x0 must be preserved on miss.
- __ Mov(result, x2);
+ __ Ldr(result, unmapped_location);
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
__ Ret();
__ Bind(&miss);
@@ -508,18 +449,14 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -----------------------------------
-
Label slow, notin;
+ Register value = ValueRegister();
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
+ DCHECK(value.is(x0));
- Register value = x0;
- Register key = x1;
- Register receiver = x2;
Register map = x3;
// These registers are used by GenerateMappedArgumentsLookup to build a
@@ -559,16 +496,12 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
+ // The return address is in lr.
Isolate* isolate = masm->isolate();
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
- __ Push(x1, x0);
+ __ Push(ReceiverRegister(), NameRegister());
// Perform tail call to the entry.
ExternalReference ref =
@@ -578,16 +511,35 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
- Register key = x0;
- Register receiver = x1;
+// IC register specifications
+const Register LoadIC::ReceiverRegister() { return x1; }
+const Register LoadIC::NameRegister() { return x2; }
- __ Push(receiver, key);
+const Register LoadIC::SlotRegister() {
+ DCHECK(FLAG_vector_ics);
+ return x0;
+}
+
+
+const Register LoadIC::VectorRegister() {
+ DCHECK(FLAG_vector_ics);
+ return x3;
+}
+
+
+const Register StoreIC::ReceiverRegister() { return x1; }
+const Register StoreIC::NameRegister() { return x2; }
+const Register StoreIC::ValueRegister() { return x0; }
+
+
+const Register KeyedStoreIC::MapRegister() {
+ return x3;
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+ __ Push(ReceiverRegister(), NameRegister());
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -601,7 +553,7 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
Register scratch4,
Register scratch5,
Label *slow) {
- ASSERT(!AreAliased(
+ DCHECK(!AreAliased(
key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
Isolate* isolate = masm->isolate();
@@ -642,7 +594,7 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
Register scratch4,
Register scratch5,
Label *slow) {
- ASSERT(!AreAliased(
+ DCHECK(!AreAliased(
key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
Isolate* isolate = masm->isolate();
@@ -756,32 +708,30 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
+ // The return address is in lr.
Label slow, check_name, index_smi, index_name;
- Register key = x0;
- Register receiver = x1;
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(key.is(x2));
+ DCHECK(receiver.is(x1));
__ JumpIfNotSmi(key, &check_name);
__ Bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
- // Slow case, key and receiver still in x0 and x1.
+ // Slow case.
__ Bind(&slow);
__ IncrementCounter(
- masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
+ masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3);
GenerateRuntimeGetProperty(masm);
__ Bind(&check_name);
- GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
+ GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
- GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
__ Bind(&index_name);
__ IndexFromHash(x3, key);
@@ -791,17 +741,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key (index)
- // -- x1 : receiver
- // -----------------------------------
+ // Return address is in lr.
Label miss;
- Register index = x0;
- Register receiver = x1;
+ Register receiver = ReceiverRegister();
+ Register index = NameRegister();
Register result = x0;
Register scratch = x3;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
StringCharAtGenerator char_at_generator(receiver,
index,
@@ -823,14 +770,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
+ // Return address is in lr.
Label slow;
- Register key = x0;
- Register receiver = x1;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ Register scratch1 = x3;
+ Register scratch2 = x4;
+ DCHECK(!AreAliased(scratch1, scratch2, receiver, key));
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &slow);
@@ -839,24 +786,23 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
// Get the map of the receiver.
- Register map = x2;
+ Register map = scratch1;
__ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
- __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
- ASSERT(kSlowCaseBitFieldMask ==
+ __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
+ DCHECK(kSlowCaseBitFieldMask ==
((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
- __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
- __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
+ __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
+ __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
// Everything is fine, call runtime.
__ Push(receiver, key);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ ExternalReference(IC_Utility(kLoadElementWithInterceptor),
masm->isolate()),
- 2,
- 1);
+ 2, 1);
__ Bind(&slow);
GenerateMiss(masm);
@@ -865,15 +811,9 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateMiss");
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -----------------------------------
// Push receiver, key and value for runtime call.
- __ Push(x2, x1, x0);
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -883,15 +823,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateSlow");
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -----------------------------------
// Push receiver, key and value for runtime call.
- __ Push(x2, x1, x0);
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
@@ -904,22 +838,15 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictMode strict_mode) {
ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -----------------------------------
// Push receiver, key and value for runtime call.
- __ Push(x2, x1, x0);
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
- // Push PropertyAttributes(NONE) and strict_mode for runtime call.
- STATIC_ASSERT(NONE == 0);
+ // Push strict_mode for runtime call.
__ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(xzr, x10);
+ __ Push(x10);
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
}
@@ -936,7 +863,7 @@ static void KeyedStoreGenerateGenericHelper(
Register receiver_map,
Register elements_map,
Register elements) {
- ASSERT(!AreAliased(
+ DCHECK(!AreAliased(
value, key, receiver, receiver_map, elements_map, elements, x10, x11));
Label transition_smi_elements;
@@ -1043,10 +970,10 @@ static void KeyedStoreGenerateGenericHelper(
x10,
x11,
slow);
- ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ ElementsTransitionGenerator::GenerateSmiToDouble(
+ masm, receiver, key, value, receiver_map, mode, slow);
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ B(&fast_double_without_map_check);
@@ -1058,10 +985,11 @@ static void KeyedStoreGenerateGenericHelper(
x10,
x11,
slow);
- ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, receiver, key, value, receiver_map, mode, slow);
+
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ B(&finish_store);
@@ -1075,9 +1003,9 @@ static void KeyedStoreGenerateGenericHelper(
x10,
x11,
slow);
- ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(
+ masm, receiver, key, value, receiver_map, mode, slow);
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ B(&finish_store);
}
@@ -1086,12 +1014,6 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictMode strict_mode) {
ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -----------------------------------
Label slow;
Label array;
Label fast_object;
@@ -1100,9 +1022,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label fast_double_grow;
Label fast_double;
- Register value = x0;
- Register key = x1;
- Register receiver = x2;
+ Register value = ValueRegister();
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
+ DCHECK(value.is(x0));
+
Register receiver_map = x3;
Register elements = x4;
Register elements_map = x5;
@@ -1187,17 +1113,15 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
// Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, x1, x2, x3, x4, x5, x6);
+ masm, flags, receiver, name, x3, x4, x5, x6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1205,14 +1129,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(x1, x2, x0);
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
// Tail call to the entry.
ExternalReference ref =
@@ -1222,20 +1139,14 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
Label miss;
- Register value = x0;
- Register receiver = x1;
- Register name = x2;
+ Register value = ValueRegister();
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
Register dictionary = x3;
+ DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
- GenerateNameDictionaryReceiverCheck(
- masm, receiver, dictionary, x4, x5, &miss);
+ __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
Counters* counters = masm->isolate()->counters();
@@ -1252,21 +1163,14 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictMode strict_mode) {
ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
- __ Push(x1, x2, x0);
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
- __ Mov(x11, Smi::FromInt(NONE)); // PropertyAttributes
__ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(x11, x10);
+ __ Push(x10);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
}
@@ -1279,7 +1183,7 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
// Push receiver, name and value for runtime call.
- __ Push(x1, x2, x0);
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
@@ -1349,9 +1253,9 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// tb(!n)z test_reg, #0, <target>
Instruction* to_patch = info.SmiCheck();
PatchingAssembler patcher(to_patch, 1);
- ASSERT(to_patch->IsTestBranch());
- ASSERT(to_patch->ImmTestBranchBit5() == 0);
- ASSERT(to_patch->ImmTestBranchBit40() == 0);
+ DCHECK(to_patch->IsTestBranch());
+ DCHECK(to_patch->ImmTestBranchBit5() == 0);
+ DCHECK(to_patch->ImmTestBranchBit40() == 0);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
@@ -1359,11 +1263,11 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
int branch_imm = to_patch->ImmTestBranch();
Register smi_reg;
if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(to_patch->Rt() == xzr.code());
+ DCHECK(to_patch->Rt() == xzr.code());
smi_reg = info.SmiRegister();
} else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(to_patch->Rt() != xzr.code());
+ DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+ DCHECK(to_patch->Rt() != xzr.code());
smi_reg = xzr;
}
@@ -1371,7 +1275,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// This is JumpIfNotSmi(smi_reg, branch_imm).
patcher.tbnz(smi_reg, 0, branch_imm);
} else {
- ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
+ DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
// This is JumpIfSmi(smi_reg, branch_imm).
patcher.tbz(smi_reg, 0, branch_imm);
}
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 2996fc94c..a6ca6affa 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_FP_STATICS
-#include "arm64/instructions-arm64.h"
-#include "arm64/assembler-arm64-inl.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/instructions-arm64.h"
namespace v8 {
namespace internal {
@@ -67,7 +67,7 @@ bool Instruction::IsStore() const {
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
- ASSERT(width <= 64);
+ DCHECK(width <= 64);
rotate &= 63;
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
(value >> rotate);
@@ -77,9 +77,9 @@ static uint64_t RotateRight(uint64_t value,
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
- ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
- ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
uint64_t result = value & ((1UL << width) - 1UL);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
@@ -193,7 +193,7 @@ ptrdiff_t Instruction::ImmPCOffset() {
offset = ImmBranch() << kInstructionSizeLog2;
} else {
// Load literal (offset from PC).
- ASSERT(IsLdrLiteral());
+ DCHECK(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
offset = ImmLLiteral() << kInstructionSizeLog2;
@@ -231,9 +231,9 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
- ASSERT(IsAdr());
+ DCHECK(IsAdr());
- int target_offset = DistanceTo(target);
+ ptrdiff_t target_offset = DistanceTo(target);
Instr imm;
if (Instruction::IsValidPCRelOffset(target_offset)) {
imm = Assembler::ImmPCRelAddress(target_offset);
@@ -241,13 +241,13 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
} else {
PatchingAssembler patcher(this,
PatchingAssembler::kAdrFarPatchableNInstrs);
- patcher.PatchAdrFar(target);
+ patcher.PatchAdrFar(target_offset);
}
}
void Instruction::SetBranchImmTarget(Instruction* target) {
- ASSERT(IsAligned(DistanceTo(target), kInstructionSize));
+ DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
Instr branch_imm = 0;
uint32_t imm_mask = 0;
ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
@@ -279,8 +279,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetImmLLiteral(Instruction* source) {
- ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
- ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2;
+ DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
+ ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
Instr imm = Assembler::ImmLLiteral(offset);
Instr mask = ImmLLiteral_mask;
@@ -304,7 +304,7 @@ bool InstructionSequence::IsInlineData() const {
// xzr and Register are not defined in that header. Consider adding
// instructions-arm64-inl.h to work around this.
uint64_t InstructionSequence::InlineData() const {
- ASSERT(IsInlineData());
+ DCHECK(IsInlineData());
uint64_t payload = ImmMoveWide();
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 968ddace0..bd4e75377 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -5,10 +5,10 @@
#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_ARM64_INSTRUCTIONS_ARM64_H_
-#include "globals.h"
-#include "utils.h"
-#include "arm64/constants-arm64.h"
-#include "arm64/utils-arm64.h"
+#include "src/arm64/constants-arm64.h"
+#include "src/arm64/utils-arm64.h"
+#include "src/globals.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -137,7 +137,7 @@ class Instruction {
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
- ASSERT(IsPCRelAddressing());
+ DCHECK(IsPCRelAddressing());
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
@@ -353,7 +353,7 @@ class Instruction {
void SetImmLLiteral(Instruction* source);
uint8_t* LiteralAddress() {
- int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
return reinterpret_cast<uint8_t*>(this) + offset;
}
@@ -364,7 +364,7 @@ class Instruction {
CheckAlignment check = CHECK_ALIGNMENT) {
Address addr = reinterpret_cast<Address>(this) + offset;
// The FUZZ_disasm test relies on no check being done.
- ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
+ DCHECK(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
return Cast(addr);
}
@@ -416,24 +416,38 @@ const Instr kImmExceptionIsUnreachable = 0xdebf;
// A pseudo 'printf' instruction. The arguments will be passed to the platform
// printf method.
const Instr kImmExceptionIsPrintf = 0xdeb1;
-// Parameters are stored in ARM64 registers as if the printf pseudo-instruction
-// was a call to the real printf method:
-//
-// x0: The format string, then either of:
+// Most parameters are stored in ARM64 registers as if the printf
+// pseudo-instruction was a call to the real printf method:
+// x0: The format string.
// x1-x7: Optional arguments.
// d0-d7: Optional arguments.
//
-// Floating-point and integer arguments are passed in separate sets of
-// registers in AAPCS64 (even for varargs functions), so it is not possible to
-// determine the type of location of each arguments without some information
-// about the values that were passed in. This information could be retrieved
-// from the printf format string, but the format string is not trivial to
-// parse so we encode the relevant information with the HLT instruction.
-// - Type
-// Either kRegister or kFPRegister, but stored as a uint32_t because there's
-// no way to guarantee the size of the CPURegister::RegisterType enum.
-const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
-const unsigned kPrintfLength = 2 * kInstructionSize;
+// Also, the argument layout is described inline in the instructions:
+// - arg_count: The number of arguments.
+// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
+//
+// Floating-point and integer arguments are passed in separate sets of registers
+// in AAPCS64 (even for varargs functions), so it is not possible to determine
+// the type of each argument without some information about the values that were
+// passed in. This information could be retrieved from the printf format string,
+// but the format string is not trivial to parse so we encode the relevant
+// information with the HLT instruction.
+const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
+const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
+const unsigned kPrintfLength = 3 * kInstructionSize;
+
+const unsigned kPrintfMaxArgCount = 4;
+
+// The argument pattern is a set of two-bit-fields, each with one of the
+// following values:
+enum PrintfArgPattern {
+ kPrintfArgW = 1,
+ kPrintfArgX = 2,
+ // There is no kPrintfArgS because floats are always converted to doubles in C
+ // varargs calls.
+ kPrintfArgD = 3
+};
+static const unsigned kPrintfArgPatternBits = 2;
// A pseudo 'debug' instruction.
const Instr kImmExceptionIsDebug = 0xdeb0;
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index a6fe1234b..59982d975 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "arm64/instrument-arm64.h"
+#include "src/arm64/instrument-arm64.h"
namespace v8 {
namespace internal {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
- ASSERT(name != NULL);
+ DCHECK(name != NULL);
strncpy(name_, name, kCounterNameMaxLength);
}
@@ -107,8 +107,7 @@ Instrument::Instrument(const char* datafile, uint64_t sample_period)
}
}
- static const int num_counters =
- sizeof(kCounterList) / sizeof(CounterDescriptor);
+ static const int num_counters = ARRAY_SIZE(kCounterList);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
@@ -144,7 +143,7 @@ void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
static Counter* counter = GetCounter("Instruction");
- ASSERT(counter->type() == Cumulative);
+ DCHECK(counter->type() == Cumulative);
counter->Increment();
if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
diff --git a/deps/v8/src/arm64/instrument-arm64.h b/deps/v8/src/arm64/instrument-arm64.h
index 2d41b5857..86ddfcbbc 100644
--- a/deps/v8/src/arm64/instrument-arm64.h
+++ b/deps/v8/src/arm64/instrument-arm64.h
@@ -5,10 +5,11 @@
#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
#define V8_ARM64_INSTRUMENT_ARM64_H_
-#include "globals.h"
-#include "utils.h"
-#include "arm64/decoder-arm64.h"
-#include "arm64/constants-arm64.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+#include "src/arm64/constants-arm64.h"
+#include "src/arm64/decoder-arm64.h"
namespace v8 {
namespace internal {
@@ -31,7 +32,7 @@ enum CounterType {
class Counter {
public:
- Counter(const char* name, CounterType type = Gauge);
+ explicit Counter(const char* name, CounterType type = Gauge);
void Increment();
void Enable();
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index a0d3c298f..7bb66dbd7 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -2,17 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium-allocator-inl.h"
-#include "arm64/lithium-arm64.h"
-#include "arm64/lithium-codegen-arm64.h"
-#include "hydrogen-osr.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
namespace v8 {
namespace internal {
-
#define DEFINE_COMPILE(type) \
void L##type::CompileToNative(LCodeGen* generator) { \
generator->Do##type(this); \
@@ -26,17 +24,17 @@ void LInstruction::VerifyCall() {
// outputs because all registers are blocked by the calling convention.
// Inputs operands must use a fixed register or use-at-start policy or
// a non-register policy.
- ASSERT(Output() == NULL ||
+ DCHECK(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
+ DCHECK(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
for (TempIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
@@ -284,7 +282,9 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- hydrogen()->access().PrintTo(stream);
+ OStringStream os;
+ os << hydrogen()->access();
+ stream->Add(os.c_str());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -501,7 +501,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
+ DCHECK(!instr->HasPointerMap());
instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -543,21 +543,28 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
- ASSERT(kind == GENERAL_REGISTERS);
+ DCHECK(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ DCHECK(operand->HasFixedPolicy());
+ return operand;
+}
+
+
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
+ DCHECK(operand->HasFixedPolicy());
return operand;
}
LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
+ DCHECK(is_unused());
chunk_ = new(zone()) LPlatformChunk(info_, graph_);
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
@@ -583,7 +590,7 @@ LPlatformChunk* LChunkBuilder::Build() {
void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
- ASSERT(is_building());
+ DCHECK(is_building());
current_block_ = block;
if (block->IsStartBlock()) {
@@ -592,14 +599,14 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
} else if (block->predecessors()->length() == 1) {
// We have a single predecessor => copy environment and outgoing
// argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
+ DCHECK(block->phis()->length() == 0);
HBasicBlock* pred = block->predecessors()->at(0);
HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
+ DCHECK(last_environment != NULL);
// Only copy the environment, if it is later used again.
if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
+ DCHECK(pred->end()->FirstSuccessor() == block);
} else {
if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
(pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
@@ -607,7 +614,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
}
}
block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
+ DCHECK(pred->argument_count() >= 0);
argument_count_ = pred->argument_count();
} else {
// We are at a state join => process phis.
@@ -660,7 +667,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
- ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ DCHECK(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
@@ -672,76 +679,90 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
chunk_->AddInstruction(dummy, current_block_);
}
} else {
- instr = current->CompileToLithium(this);
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
}
argument_count_ += current->argument_delta();
- ASSERT(argument_count_ >= 0);
+ DCHECK(argument_count_ >= 0);
if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, the register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, the register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ DCHECK(fixed == 0 || used_at_start == 0);
+ }
#endif
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
}
- chunk_->AddInstruction(instr, current_block_);
-
- if (instr->IsCall()) {
- HValue* hydrogen_value_for_lazy_bailout = current;
- LInstruction* instruction_needing_environment = NULL;
- if (current->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(current->next());
- instruction_needing_environment = instr;
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
}
}
- current_instruction_ = old_current;
}
@@ -765,9 +786,9 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
if (op == Token::MOD) {
LOperand* left = UseFixedDouble(instr->left(), d0);
@@ -785,7 +806,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
HBinaryOperation* instr) {
- ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+ DCHECK((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
(op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
(op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
(op == Token::BIT_OR) || (op == Token::BIT_AND) ||
@@ -795,9 +816,9 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
// TODO(jbramley): Once we've implemented smi support for all arithmetic
// operations, these assertions should check IsTagged().
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(left->representation().IsSmiOrTagged());
- ASSERT(right->representation().IsSmiOrTagged());
+ DCHECK(instr->representation().IsSmiOrTagged());
+ DCHECK(left->representation().IsSmiOrTagged());
+ DCHECK(right->representation().IsSmiOrTagged());
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, x1);
@@ -837,8 +858,8 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
@@ -856,16 +877,16 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- ASSERT(instr->left()->representation().IsExternal());
- ASSERT(instr->right()->representation().IsInteger32());
- ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ DCHECK(instr->left()->representation().IsExternal());
+ DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return DefineAsRegister(new(zone()) LAddE(left, right));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
+ DCHECK(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -921,9 +942,9 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
@@ -965,9 +986,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
@@ -976,7 +994,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
// These representations have simple checks that cannot deoptimize.
return new(zone()) LBranch(UseRegister(value), NULL, NULL);
} else {
- ASSERT(r.IsTagged());
+ DCHECK(r.IsTagged());
if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
type.IsHeapNumber()) {
// These types have simple checks that cannot deoptimize.
@@ -996,7 +1014,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
if (expected.IsGeneric() || expected.IsEmpty()) {
// The generic case cannot deoptimize because it already supports every
// possible input type.
- ASSERT(needs_temps);
+ DCHECK(needs_temps);
return new(zone()) LBranch(UseRegister(value), temp1, temp2);
} else {
return AssignEnvironment(
@@ -1018,7 +1036,7 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
- const CallInterfaceDescriptor* descriptor = instr->descriptor();
+ const InterfaceDescriptor* descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1108,7 +1126,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
- ASSERT(to.IsInteger32());
+ DCHECK(to.IsInteger32());
if (val->type().IsSmi() || val->representation().IsSmi()) {
LOperand* value = UseRegisterAtStart(val);
return DefineAsRegister(new(zone()) LSmiUntag(value, false));
@@ -1132,7 +1150,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
} else {
- ASSERT(to.IsSmi() || to.IsInteger32());
+ DCHECK(to.IsSmi() || to.IsInteger32());
if (instr->CanTruncateToInt32()) {
LOperand* value = UseRegister(val);
return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value));
@@ -1164,7 +1182,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
return result;
} else {
- ASSERT(to.IsDouble());
+ DCHECK(to.IsDouble());
if (val->CheckFlag(HInstruction::kUint32)) {
return DefineAsRegister(
new(zone()) LUint32ToDouble(UseRegisterAtStart(val)));
@@ -1209,7 +1227,9 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->IsHeapObject()) result = AssignEnvironment(result);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
return result;
}
@@ -1229,7 +1249,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
- ASSERT(input_rep.IsSmiOrTagged());
+ DCHECK(input_rep.IsSmiOrTagged());
return AssignEnvironment(
DefineAsRegister(new(zone()) LClampTToUint8(reg,
TempRegister(),
@@ -1240,7 +1260,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LClassOfTestAndBranch(value,
TempRegister(),
@@ -1250,36 +1270,32 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(r));
- ASSERT(instr->right()->representation().Equals(r));
+ DCHECK(instr->left()->representation().Equals(r));
+ DCHECK(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
} else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- // TODO(all): In fact the only case that we can handle more efficiently is
- // when one of the operand is the constant 0. Currently the MacroAssembler
- // will be able to cope with any constant by loading it into an internal
- // scratch register. This means that if the constant is used more that once,
- // it will be loaded multiple times. Unfortunatly crankshaft already
- // duplicates constant loads, but we should modify the code below once this
- // issue has been addressed in crankshaft.
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ DCHECK(r.IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+ LOperand* left = UseConstant(instr->left());
+ LOperand* right = UseConstant(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);
LOperand* right = UseFixed(instr->right(), x0);
@@ -1302,9 +1318,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1312,10 +1325,7 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LCmpMapAndBranch(value, temp);
@@ -1376,9 +1386,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
@@ -1394,9 +1404,9 @@ LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
@@ -1413,9 +1423,9 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
@@ -1496,7 +1506,7 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
}
@@ -1509,7 +1519,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
return new(zone()) LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()), TempRegister());
}
@@ -1517,7 +1527,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
}
@@ -1568,8 +1578,6 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
LOperand* scratch = TempRegister();
return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
@@ -1577,7 +1585,7 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@@ -1586,7 +1594,7 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LIsStringAndBranch(value, temp);
@@ -1594,14 +1602,14 @@ LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
}
@@ -1614,7 +1622,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- ASSERT(instr->argument_delta() == -argument_count);
+ DCHECK(instr->argument_delta() == -argument_count);
}
HEnvironment* outer =
@@ -1655,15 +1663,21 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(), x0);
+ LOperand* global_object = UseFixed(instr->global_object(),
+ LoadIC::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(LoadIC::VectorRegister());
+ }
+
LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object);
+ new(zone()) LLoadGlobalGeneric(context, global_object, vector);
return MarkAsCall(DefineFixed(result, x0), instr);
}
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
@@ -1681,7 +1695,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
} else {
- ASSERT(instr->representation().IsSmiOrTagged() ||
+ DCHECK(instr->representation().IsSmiOrTagged() ||
instr->representation().IsInteger32());
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
LLoadKeyedFixed* result =
@@ -1691,7 +1705,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
: DefineAsRegister(result);
}
} else {
- ASSERT((instr->representation().IsInteger32() &&
+ DCHECK((instr->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
@@ -1711,11 +1725,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x1);
- LOperand* key = UseFixed(instr->key(), x0);
+ LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(LoadIC::VectorRegister());
+ }
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+ x0);
return MarkAsCall(result, instr);
}
@@ -1728,9 +1747,14 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x0);
+ LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(LoadIC::VectorRegister());
+ }
+
LInstruction* result =
- DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0);
return MarkAsCall(result, instr);
}
@@ -1747,9 +1771,9 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegisterAtStart(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
@@ -1763,9 +1787,9 @@ LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LOperand* temp =
@@ -1807,14 +1831,14 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
} else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
@@ -1823,14 +1847,15 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegisterAtStart(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
dividend, divisor));
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
result = AssignEnvironment(result);
}
return result;
@@ -1838,9 +1863,9 @@ LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
int32_t divisor = instr->right()->GetInteger32Constant();
LOperand* temp = TempRegister();
@@ -1854,9 +1879,9 @@ LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
@@ -1887,8 +1912,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
@@ -1946,7 +1971,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
+ DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
@@ -1959,22 +1984,22 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk_->GetParameterStackSlot(instr->index());
return DefineAsSpilled(result, spill_index);
} else {
- ASSERT(info()->IsStub());
+ DCHECK(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetParameterRegister(index);
+ Register reg = descriptor->GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg);
}
}
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d0);
LOperand* right = exponent_type.IsInteger32()
? UseFixed(instr->right(), x12)
@@ -1988,9 +2013,21 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = UseRegister(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ AddInstruction(new(zone()) LPreparePushArguments(argc), instr);
+
+ LPushArguments* push_args = new(zone()) LPushArguments(zone());
+
+ for (int i = 0; i < argc; ++i) {
+ if (push_args->ShouldSplitPush()) {
+ AddInstruction(push_args, instr);
+ push_args = new(zone()) LPushArguments(zone());
+ }
+ push_args->AddArgument(UseRegister(instr->argument(i)));
+ }
+
+ return push_args;
}
@@ -2003,16 +2040,15 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
HValue* value = instr->value();
- ASSERT(value->representation().IsDouble());
+ DCHECK(value->representation().IsDouble());
return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
}
LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
+ LOperand* lo = UseRegisterAndClobber(instr->lo());
LOperand* hi = UseRegister(instr->hi());
- LOperand* temp = TempRegister();
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo, temp));
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
}
@@ -2058,8 +2094,8 @@ HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
HBinaryOperation* hinstr = HBinaryOperation::cast(val);
HValue* hleft = hinstr->left();
HValue* hright = hinstr->right();
- ASSERT(hleft->representation().Equals(hinstr->representation()));
- ASSERT(hright->representation().Equals(hinstr->representation()));
+ DCHECK(hleft->representation().Equals(hinstr->representation()));
+ DCHECK(hright->representation().Equals(hinstr->representation()));
if ((hright->IsConstant() &&
LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
@@ -2131,8 +2167,8 @@ LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
LInstruction* LChunkBuilder::DoShiftedBinaryOp(
HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
- ASSERT(hshift->IsBitwiseBinaryShift());
- ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
+ DCHECK(hshift->IsBitwiseBinaryShift());
+ DCHECK(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
LTemplateResultInstruction<1>* res;
LOperand* left = UseRegisterAtStart(hleft);
@@ -2151,7 +2187,7 @@ LInstruction* LChunkBuilder::DoShiftedBinaryOp(
} else if (hinstr->IsAdd()) {
res = new(zone()) LAddI(left, right, shift_op, shift_amount);
} else {
- ASSERT(hinstr->IsSub());
+ DCHECK(hinstr->IsSub());
res = new(zone()) LSubI(left, right, shift_op, shift_amount);
}
if (hinstr->CheckFlag(HValue::kCanOverflow)) {
@@ -2167,10 +2203,10 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
return DoArithmeticT(op, instr);
}
- ASSERT(instr->representation().IsInteger32() ||
+ DCHECK(instr->representation().IsInteger32() ||
instr->representation().IsSmi());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
if (ShiftCanBeOptimizedAway(instr)) {
return NULL;
@@ -2209,7 +2245,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (instr->representation().IsInteger32()) {
result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
} else {
- ASSERT(instr->representation().IsSmi());
+ DCHECK(instr->representation().IsSmi());
result = DefineAsRegister(
new(zone()) LShiftS(op, left, right, temp, does_deopt));
}
@@ -2249,7 +2285,7 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
- ASSERT(instr->is_backwards_branch());
+ DCHECK(instr->is_backwards_branch());
LOperand* context = UseAny(instr->context());
return AssignEnvironment(
AssignPointerMap(new(zone()) LStackCheck(context)));
@@ -2318,23 +2354,23 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
if (instr->is_typed_elements()) {
- ASSERT((instr->value()->representation().IsInteger32() &&
+ DCHECK((instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- ASSERT((instr->is_fixed_typed_array() &&
+ DCHECK((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
instr->elements()->representation().IsExternal()));
return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
} else if (instr->value()->representation().IsDouble()) {
- ASSERT(instr->elements()->representation().IsTagged());
+ DCHECK(instr->elements()->representation().IsTagged());
return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
} else {
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+ DCHECK(instr->elements()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsSmiOrTagged() ||
instr->value()->representation().IsInteger32());
return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
}
@@ -2343,13 +2379,14 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x2);
- LOperand* key = UseFixed(instr->key(), x1);
- LOperand* value = UseFixed(instr->value(), x0);
+ LOperand* object = UseFixed(instr->object(),
+ KeyedStoreIC::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister());
+ LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
+ DCHECK(instr->object()->representation().IsTagged());
+ DCHECK(instr->key()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
return MarkAsCall(
new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
@@ -2387,8 +2424,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x1);
- LOperand* value = UseFixed(instr->value(), x0);
+ LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister());
+ LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister());
+
LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
}
@@ -2425,8 +2463,8 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);
LOperand* right = UseFixed(instr->right(), x0);
@@ -2438,8 +2476,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
@@ -2527,9 +2565,6 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
// We only need temp registers in some cases, but we can't dereference the
// instr->type_literal() handle to test that here.
LOperand* temp1 = TempRegister();
@@ -2563,8 +2598,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
}
}
case kMathExp: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
LOperand* double_temp1 = TempDoubleRegister();
LOperand* temp1 = TempRegister();
@@ -2575,52 +2610,58 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
case kMathFloor: {
- ASSERT(instr->value()->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
if (instr->representation().IsInteger32()) {
LMathFloorI* result = new(zone()) LMathFloorI(input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
- ASSERT(instr->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
LMathFloorD* result = new(zone()) LMathFloorD(input);
return DefineAsRegister(result);
}
}
case kMathLog: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseFixedDouble(instr->value(), d0);
LMathLog* result = new(zone()) LMathLog(input);
return MarkAsCall(DefineFixedDouble(result, d0), instr);
}
case kMathPowHalf: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
return DefineAsRegister(new(zone()) LMathPowHalf(input));
}
case kMathRound: {
- ASSERT(instr->value()->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
if (instr->representation().IsInteger32()) {
LOperand* temp = TempDoubleRegister();
LMathRoundI* result = new(zone()) LMathRoundI(input, temp);
return AssignEnvironment(DefineAsRegister(result));
} else {
- ASSERT(instr->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
LMathRoundD* result = new(zone()) LMathRoundD(input);
return DefineAsRegister(result);
}
}
+ case kMathFround: {
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LMathFround* result = new (zone()) LMathFround(input);
+ return DefineAsRegister(result);
+ }
case kMathSqrt: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMathSqrt(input));
}
case kMathClz32: {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->value()->representation().IsInteger32());
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->value()->representation().IsInteger32());
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMathClz32(input));
}
@@ -2695,4 +2736,20 @@ LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
}
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 3abc388fe..21a5f7414 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -5,11 +5,11 @@
#ifndef V8_ARM64_LITHIUM_ARM64_H_
#define V8_ARM64_LITHIUM_ARM64_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium.h"
+#include "src/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -17,159 +17,163 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddE) \
- V(AddI) \
- V(AddS) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BitS) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallFunction) \
- V(CallJSFunction) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CallWithDescriptor) \
- V(CheckInstanceType) \
- V(CheckMapValue) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpHoleAndBranchD) \
- V(CmpHoleAndBranchT) \
- V(CmpMapAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpT) \
- V(CompareMinusZeroAndBranch) \
- V(CompareNumericAndBranch) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(ConstructDouble) \
- V(Context) \
- V(DateField) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleBits) \
- V(DoubleToIntOrSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
- V(IsSmiAndBranch) \
- V(IsStringAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyedExternal) \
- V(LoadKeyedFixed) \
- V(LoadKeyedFixedDouble) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedGeneric) \
- V(LoadRoot) \
- V(MapEnumLength) \
- V(MathAbs) \
- V(MathAbsTagged) \
- V(MathClz32) \
- V(MathExp) \
- V(MathFloorD) \
- V(MathFloorI) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRoundD) \
- V(MathRoundI) \
- V(MathSqrt) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulConstIS) \
- V(MulI) \
- V(MulS) \
- V(NumberTagD) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(ShiftS) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreKeyedExternal) \
- V(StoreKeyedFixed) \
- V(StoreKeyedFixedDouble) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(SubS) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(TruncateDoubleToIntOrSmi) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddE) \
+ V(AddI) \
+ V(AddS) \
+ V(Allocate) \
+ V(AllocateBlockContext) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BitS) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallFunction) \
+ V(CallJSFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CallWithDescriptor) \
+ V(CheckInstanceType) \
+ V(CheckMapValue) \
+ V(CheckMaps) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpHoleAndBranchD) \
+ V(CmpHoleAndBranchT) \
+ V(CmpMapAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpT) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToIntOrSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedExternal) \
+ V(LoadKeyedFixed) \
+ V(LoadKeyedFixedDouble) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathAbsTagged) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloorD) \
+ V(MathFloorI) \
+ V(MathFround) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRoundD) \
+ V(MathRoundI) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulConstIS) \
+ V(MulI) \
+ V(MulS) \
+ V(NumberTagD) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PreparePushArguments) \
+ V(PushArguments) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(ShiftS) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyedExternal) \
+ V(StoreKeyedFixed) \
+ V(StoreKeyedFixedDouble) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(SubS) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(TruncateDoubleToIntOrSmi) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
V(WrapReceiver)
@@ -182,7 +186,7 @@ class LCodeGen;
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
+ DCHECK(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
}
@@ -230,6 +234,9 @@ class LInstruction : public ZoneObject {
virtual bool IsControl() const { return false; }
+ // Try deleting this instruction if possible.
+ virtual bool TryDelete() { return false; }
+
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
bool HasEnvironment() const { return environment_ != NULL; }
@@ -384,7 +391,7 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
virtual bool IsGap() const V8_OVERRIDE { return true; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
+ DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
}
@@ -445,7 +452,7 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LDummy() { }
+ LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
@@ -936,7 +943,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
- LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+ explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -1040,17 +1047,15 @@ class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LConstructDouble(LOperand* hi, LOperand* lo, LOperand* temp) {
+ LConstructDouble(LOperand* hi, LOperand* lo) {
inputs_[0] = hi;
inputs_[1] = lo;
- temps_[0] = temp;
}
LOperand* hi() { return inputs_[0]; }
LOperand* lo() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
};
@@ -1288,6 +1293,7 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -1517,18 +1523,18 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
- ZoneList<LOperand*>& operands,
+ LCallWithDescriptor(const InterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
Zone* zone)
: descriptor_(descriptor),
- inputs_(descriptor->environment_length() + 1, zone) {
- ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
- const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+ const InterfaceDescriptor* descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -1538,7 +1544,7 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
int arity() const { return hydrogen()->argument_count() - 1; }
- const CallInterfaceDescriptor* descriptor_;
+ const InterfaceDescriptor* descriptor_;
ZoneList<LOperand*> inputs_;
// Iterator support.
@@ -1718,15 +1724,18 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = global_object;
+ temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* global_object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1758,15 +1767,15 @@ class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
bool is_typed_elements() const {
return is_external() || is_fixed_typed_array();
}
- uint32_t additional_index() const {
- return this->hydrogen()->index_offset();
+ uint32_t base_offset() const {
+ return this->hydrogen()->base_offset();
}
void PrintDataTo(StringStream* stream) V8_OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
- if (this->hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", this->additional_index());
+ if (this->base_offset() != 0) {
+ stream->Add(" + %d]", this->base_offset());
} else {
stream->Add("]");
}
@@ -1815,31 +1824,37 @@ class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
+ temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
+ LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
+ temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -2072,6 +2087,14 @@ class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
};
+class LMathFround V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
@@ -2250,15 +2273,50 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
+ explicit LPreparePushArguments(int argc) : argc_(argc) {}
+
+ inline int argc() const { return argc_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PreparePushArguments, "prepare-push-arguments")
+
+ protected:
+ int argc_;
+};
+
+
+class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> {
+ public:
+ explicit LPushArguments(Zone* zone,
+ int capacity = kRecommendedMaxPushedArgs)
+ : zone_(zone), inputs_(capacity, zone) {}
+
+ LOperand* argument(int i) { return inputs_[i]; }
+ int ArgumentCount() const { return inputs_.length(); }
+
+ void AddArgument(LOperand* arg) { inputs_.Add(arg, zone_); }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArguments, "push-arguments")
+
+ // It is better to limit the number of arguments pushed simultaneously to
+ // avoid pressure on the register allocator.
+ static const int kRecommendedMaxPushedArgs = 4;
+ bool ShouldSplitPush() const {
+ return inputs_.length() >= kRecommendedMaxPushedArgs;
}
- LOperand* value() { return inputs_[0]; }
+ protected:
+ Zone* zone_;
+ ZoneList<LOperand*> inputs_;
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
@@ -2290,7 +2348,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
return parameter_count()->IsConstantOperand();
}
LConstantOperand* constant_parameter_count() {
- ASSERT(has_constant_parameter_count());
+ DCHECK(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
@@ -2420,20 +2478,20 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
}
return this->hydrogen()->NeedsCanonicalization();
}
- uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
void PrintDataTo(StringStream* stream) V8_OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
- if (this->hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", this->additional_index());
+ if (this->base_offset() != 0) {
+ stream->Add(" + %d] <-", this->base_offset());
} else {
stream->Add("] <- ");
}
if (this->value() == NULL) {
- ASSERT(hydrogen()->IsConstantHoleStore() &&
+ DCHECK(hydrogen()->IsConstantHoleStore() &&
hydrogen()->value()->representation().IsDouble());
stream->Add("<the hole(nan)>");
} else {
@@ -2451,7 +2509,7 @@ class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
LOperand* temp) :
LStoreKeyed<1>(elements, key, value) {
temps_[0] = temp;
- };
+ }
LOperand* temp() { return temps_[0]; }
@@ -2465,7 +2523,7 @@ class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
LOperand* temp) :
LStoreKeyed<1>(elements, key, value) {
temps_[0] = temp;
- };
+ }
LOperand* temp() { return temps_[0]; }
@@ -2479,7 +2537,7 @@ class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
LOperand* temp) :
LStoreKeyed<1>(elements, key, value) {
temps_[0] = temp;
- };
+ }
LOperand* temp() { return temps_[0]; }
@@ -2962,6 +3020,35 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
@@ -3003,8 +3090,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// Build the sequence for the graph.
LPlatformChunk* Build();
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -3092,6 +3177,8 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// Temporary operand that must be in a double register.
MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+
// Temporary operand that must be in a fixed double register.
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
@@ -3123,6 +3210,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* AssignEnvironment(LInstruction* instr);
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block);
int JSShiftAmountFromHConstant(HValue* constant) {
@@ -3132,7 +3220,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
if (instr->IsAdd() || instr->IsSub()) {
return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
} else {
- ASSERT(instr->IsBitwise());
+ DCHECK(instr->IsBitwise());
unsigned unused_n, unused_imm_s, unused_imm_r;
return Assembler::IsImmLogical(imm, kWRegSizeInBits,
&unused_n, &unused_imm_s, &unused_imm_r);
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index 610502a7f..53a1cfac4 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "arm64/lithium-codegen-arm64.h"
-#include "arm64/lithium-gap-resolver-arm64.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "hydrogen-osr.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/code-stubs.h"
+#include "src/hydrogen-osr.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -56,7 +56,7 @@ class BranchOnCondition : public BranchGenerator {
virtual void EmitInverted(Label* label) const {
if (cond_ != al) {
- __ B(InvertCondition(cond_), label);
+ __ B(NegateCondition(cond_), label);
}
}
@@ -86,7 +86,7 @@ class CompareAndBranch : public BranchGenerator {
}
virtual void EmitInverted(Label* label) const {
- __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
+ __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
}
private:
@@ -136,7 +136,7 @@ class TestAndBranch : public BranchGenerator {
break;
default:
__ Tst(value_, mask_);
- __ B(InvertCondition(cond_), label);
+ __ B(NegateCondition(cond_), label);
}
}
@@ -238,13 +238,13 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
+ DCHECK(translation_size == 1);
+ DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
+ DCHECK(translation_size == 2);
+ DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
@@ -386,7 +386,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
- ASSERT(instr != NULL);
+ DCHECK(instr != NULL);
Assembler::BlockPoolsScope scope(masm_);
__ Call(code, mode);
@@ -402,9 +402,9 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->function()).Is(x1));
- ASSERT(ToRegister(instr->result()).Is(x0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).Is(x1));
+ DCHECK(ToRegister(instr->result()).Is(x0));
int arity = instr->arity();
CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
@@ -414,9 +414,9 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->constructor()).is(x1));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToRegister(instr->constructor()).is(x1));
__ Mov(x0, instr->arity());
// No cell in x2 for construct type feedback in optimized code.
@@ -426,14 +426,14 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
after_push_argument_ = false;
- ASSERT(ToRegister(instr->result()).is(x0));
+ DCHECK(ToRegister(instr->result()).is(x0));
}
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->constructor()).is(x1));
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(x1));
__ Mov(x0, Operand(instr->arity()));
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
@@ -474,7 +474,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
}
after_push_argument_ = false;
- ASSERT(ToRegister(instr->result()).is(x0));
+ DCHECK(ToRegister(instr->result()).is(x0));
}
@@ -482,7 +482,7 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles) {
- ASSERT(instr != NULL);
+ DCHECK(instr != NULL);
__ CallRuntime(function, num_arguments, save_doubles);
@@ -529,7 +529,7 @@ void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
} else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
@@ -540,7 +540,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
- ASSERT(expected_safepoint_kind_ == kind);
+ DCHECK(expected_safepoint_kind_ == kind);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(
@@ -580,16 +580,9 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
bool LCodeGen::GenerateCode() {
LPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
+ DCHECK(is_unused());
status_ = GENERATING;
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -606,8 +599,8 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::SaveCallerDoubles() {
- ASSERT(info()->saves_caller_doubles());
- ASSERT(NeedsEagerFrame());
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator iterator(doubles);
@@ -624,8 +617,8 @@ void LCodeGen::SaveCallerDoubles() {
void LCodeGen::RestoreCallerDoubles() {
- ASSERT(info()->saves_caller_doubles());
- ASSERT(NeedsEagerFrame());
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator iterator(doubles);
@@ -642,7 +635,7 @@ void LCodeGen::RestoreCallerDoubles() {
bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
+ DCHECK(is_generating());
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -661,17 +654,21 @@ bool LCodeGen::GeneratePrologue() {
__ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
__ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
__ Poke(x10, receiver_offset);
__ Bind(&ok);
}
}
- ASSERT(__ StackPointer().Is(jssp));
+ DCHECK(__ StackPointer().Is(jssp));
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -690,13 +687,16 @@ bool LCodeGen::GeneratePrologue() {
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is in x1.
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
__ Push(x1);
- __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in x0. It replaces the context passed to us. It's
@@ -719,8 +719,15 @@ bool LCodeGen::GeneratePrologue() {
MemOperand target = ContextMemOperand(cp, var->index());
__ Str(value, target);
// Update the write barrier. This clobbers value and scratch.
- __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
- GetLinkRegisterState(), kSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
@@ -747,7 +754,7 @@ void LCodeGen::GenerateOsrPrologue() {
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- ASSERT(slots >= 0);
+ DCHECK(slots >= 0);
__ Claim(slots);
}
@@ -763,7 +770,7 @@ void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
+ DCHECK(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
LDeferredCode* code = deferred_[i];
@@ -783,8 +790,8 @@ bool LCodeGen::GenerateDeferredCode() {
if (NeedsDeferredFrame()) {
Comment(";;; Build frame");
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
+ DCHECK(!frame_is_built_);
+ DCHECK(info()->IsStub());
frame_is_built_ = true;
__ Push(lr, fp, cp);
__ Mov(fp, Smi::FromInt(StackFrame::STUB));
@@ -798,7 +805,7 @@ bool LCodeGen::GenerateDeferredCode() {
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
- ASSERT(frame_is_built_);
+ DCHECK(frame_is_built_);
__ Pop(xzr, cp, fp, lr);
frame_is_built_ = false;
}
@@ -818,51 +825,82 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
+ Label needs_frame, restore_caller_doubles, call_deopt_entry;
+
if (deopt_jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
- }
- Label table_start;
- __ bind(&table_start);
- Label needs_frame;
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ Bind(&deopt_jump_table_[i]->label);
- Address entry = deopt_jump_table_[i]->address;
- Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (deopt_jump_table_[i]->needs_frame) {
- ASSERT(!info()->saves_caller_doubles());
+ Address base = deopt_jump_table_[0]->address;
- UseScratchRegisterScope temps(masm());
- Register stub_deopt_entry = temps.AcquireX();
- Register stub_marker = temps.AcquireX();
+ UseScratchRegisterScope temps(masm());
+ Register entry_offset = temps.AcquireX();
+
+ int length = deopt_jump_table_.length();
+ for (int i = 0; i < length; i++) {
+ __ Bind(&deopt_jump_table_[i]->label);
- __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
- if (needs_frame.is_bound()) {
- __ B(&needs_frame);
+ Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
+ Address entry = deopt_jump_table_[i]->address;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
} else {
- __ Bind(&needs_frame);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
- __ Push(lr, fp, cp, stub_marker);
- __ Add(fp, __ StackPointer(), 2 * kPointerSize);
- __ Call(stub_deopt_entry);
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
- } else {
- if (info()->saves_caller_doubles()) {
- ASSERT(info()->IsStub());
- RestoreCallerDoubles();
+
+ // Second-level deopt table entries are contiguous and small, so instead
+ // of loading the full, absolute address of each one, load the base
+ // address and add an immediate offset.
+ __ Mov(entry_offset, entry - base);
+
+ // The last entry can fall through into `call_deopt_entry`, avoiding a
+ // branch.
+ bool last_entry = (i + 1) == length;
+
+ if (deopt_jump_table_[i]->needs_frame) {
+ DCHECK(!info()->saves_caller_doubles());
+ if (!needs_frame.is_bound()) {
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+
+ UseScratchRegisterScope temps(masm());
+ Register stub_marker = temps.AcquireX();
+ __ Bind(&needs_frame);
+ __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+ __ Push(lr, fp, cp, stub_marker);
+ __ Add(fp, __ StackPointer(), 2 * kPointerSize);
+ if (!last_entry) __ B(&call_deopt_entry);
+ } else {
+ // Reuse the existing needs_frame code.
+ __ B(&needs_frame);
+ }
+ } else if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ if (!restore_caller_doubles.is_bound()) {
+ __ Bind(&restore_caller_doubles);
+ RestoreCallerDoubles();
+ if (!last_entry) __ B(&call_deopt_entry);
+ } else {
+ // Reuse the existing restore_caller_doubles code.
+ __ B(&restore_caller_doubles);
+ }
+ } else {
+ // There is nothing special to do, so just continue to the second-level
+ // table.
+ if (!last_entry) __ B(&call_deopt_entry);
}
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+
+ masm()->CheckConstPool(false, last_entry);
}
- masm()->CheckConstPool(false, false);
+
+ // Generate common code for calling the second-level deopt table.
+ Register deopt_entry = temps.AcquireX();
+ __ Bind(&call_deopt_entry);
+ __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
+ RelocInfo::RUNTIME_ENTRY));
+ __ Add(deopt_entry, deopt_entry, entry_offset);
+ __ Call(deopt_entry);
}
// Force constant pool emission at the end of the deopt jump table to make
@@ -877,7 +915,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
+ DCHECK(is_done());
// We do not know how much data will be emitted for the safepoint table, so
// force emission of the veneer pool.
masm()->CheckVeneerPool(true, true);
@@ -887,7 +925,7 @@ bool LCodeGen::GenerateSafepointTable() {
void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
+ DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
@@ -900,7 +938,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, 0, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
@@ -942,7 +980,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
+ DCHECK(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
@@ -967,8 +1005,8 @@ void LCodeGen::DeoptimizeBranch(
bailout_type = *override_bailout_type;
}
- ASSERT(environment->HasBeenRegistered());
- ASSERT(info()->IsOptimizing() || info()->IsStub());
+ DCHECK(environment->HasBeenRegistered());
+ DCHECK(info()->IsOptimizing() || info()->IsStub());
int id = environment->deoptimization_index();
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
@@ -990,7 +1028,7 @@ void LCodeGen::DeoptimizeBranch(
__ Mov(w1, FLAG_deopt_every_n_times);
__ Str(w1, MemOperand(x0));
__ Pop(x2, x1, x0);
- ASSERT(frame_is_built_);
+ DCHECK(frame_is_built_);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
__ Unreachable();
@@ -1007,7 +1045,7 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
- ASSERT(info()->IsStub() || frame_is_built_);
+ DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
@@ -1114,7 +1152,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT((padding_size % kInstructionSize) == 0);
+ DCHECK((padding_size % kInstructionSize) == 0);
InstructionAccurateScope instruction_accurate(
masm(), padding_size / kInstructionSize);
@@ -1130,16 +1168,16 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
Register LCodeGen::ToRegister(LOperand* op) const {
// TODO(all): support zero register results, as ToRegister32.
- ASSERT((op != NULL) && op->IsRegister());
+ DCHECK((op != NULL) && op->IsRegister());
return Register::FromAllocationIndex(op->index());
}
Register LCodeGen::ToRegister32(LOperand* op) const {
- ASSERT(op != NULL);
+ DCHECK(op != NULL);
if (op->IsConstantOperand()) {
// If this is a constant operand, the result must be the zero register.
- ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
+ DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
return wzr;
} else {
return ToRegister(op).W();
@@ -1154,27 +1192,27 @@ Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT((op != NULL) && op->IsDoubleRegister());
+ DCHECK((op != NULL) && op->IsDoubleRegister());
return DoubleRegister::FromAllocationIndex(op->index());
}
Operand LCodeGen::ToOperand(LOperand* op) {
- ASSERT(op != NULL);
+ DCHECK(op != NULL);
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsSmi()) {
- ASSERT(constant->HasSmiValue());
+ DCHECK(constant->HasSmiValue());
return Operand(Smi::FromInt(constant->Integer32Value()));
} else if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
+ DCHECK(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
Abort(kToOperandUnsupportedDoubleImmediate);
}
- ASSERT(r.IsTagged());
+ DCHECK(r.IsTagged());
return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
@@ -1199,7 +1237,7 @@ Operand LCodeGen::ToOperand32U(LOperand* op) {
Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
- ASSERT(op != NULL);
+ DCHECK(op != NULL);
if (op->IsRegister()) {
return Operand(ToRegister32(op));
} else if (op->IsConstantOperand()) {
@@ -1207,7 +1245,7 @@ Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
+ DCHECK(constant->HasInteger32Value());
return (signedness == SIGNED_INT32)
? Operand(constant->Integer32Value())
: Operand(static_cast<uint32_t>(constant->Integer32Value()));
@@ -1223,16 +1261,16 @@ Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
- ASSERT(index < 0);
+ DCHECK(index < 0);
return -(index + 1) * kPointerSize;
}
MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
- ASSERT(op != NULL);
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
int fp_offset = StackSlotOffset(op->index());
if (op->index() >= 0) {
@@ -1271,7 +1309,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle(isolate());
}
@@ -1309,7 +1347,7 @@ int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
+ DCHECK(constant->HasDoubleValue());
return constant->DoubleValue();
}
@@ -1369,7 +1407,7 @@ void LCodeGen::EmitBranchGeneric(InstrType instr,
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
- ASSERT((condition != al) && (condition != nv));
+ DCHECK((condition != al) && (condition != nv));
BranchOnCondition branch(this, condition);
EmitBranchGeneric(instr, branch);
}
@@ -1380,7 +1418,7 @@ void LCodeGen::EmitCompareAndBranch(InstrType instr,
Condition condition,
const Register& lhs,
const Operand& rhs) {
- ASSERT((condition != al) && (condition != nv));
+ DCHECK((condition != al) && (condition != nv));
CompareAndBranch branch(this, condition, lhs, rhs);
EmitBranchGeneric(instr, branch);
}
@@ -1391,7 +1429,7 @@ void LCodeGen::EmitTestAndBranch(InstrType instr,
Condition condition,
const Register& value,
uint64_t mask) {
- ASSERT((condition != al) && (condition != nv));
+ DCHECK((condition != al) && (condition != nv));
TestAndBranch branch(this, condition, value, mask);
EmitBranchGeneric(instr, branch);
}
@@ -1478,7 +1516,7 @@ void LCodeGen::DoAddE(LAddE* instr) {
? ToInteger32(LConstantOperand::cast(instr->right()))
: Operand(ToRegister32(instr->right()), SXTW);
- ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+ DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
__ Add(result, left, right);
}
@@ -1536,11 +1574,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
@@ -1575,7 +1613,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
__ FillFields(untagged_result, filler_count, filler);
} else {
- ASSERT(instr->temp3() == NULL);
+ DCHECK(instr->temp3() == NULL);
}
}
@@ -1586,7 +1624,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// contained in the register pointer map.
__ Mov(ToRegister(instr->result()), Smi::FromInt(0));
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
// We're in a SafepointRegistersScope so we can use any scratch registers.
Register size = x0;
if (instr->size()->IsConstantOperand()) {
@@ -1597,11 +1635,11 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
int flags = AllocateDoubleAlignFlag::encode(
instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
flags = AllocateTargetSpace::update(flags, NEW_SPACE);
@@ -1610,7 +1648,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(size, x10);
CallRuntimeFromDeferred(
- Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
}
@@ -1622,10 +1660,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register elements = ToRegister(instr->elements());
Register scratch = x5;
- ASSERT(receiver.Is(x0)); // Used for parameter count.
- ASSERT(function.Is(x1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).Is(x0));
- ASSERT(instr->IsMarkedAsCall());
+ DCHECK(receiver.Is(x0)); // Used for parameter count.
+ DCHECK(function.Is(x1)); // Required by InvokeFunction.
+ DCHECK(ToRegister(instr->result()).Is(x0));
+ DCHECK(instr->IsMarkedAsCall());
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -1654,7 +1692,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ B(ne, &loop);
__ Bind(&invoke);
- ASSERT(instr->HasPointerMap());
+ DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in argc (receiver) which is x0, as
@@ -1680,10 +1718,10 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// LAccessArgumentsAt implementation take that into account.
// In the inlined case we need to subtract the size of 2 words to jssp to
// get a pointer which will work well with LAccessArgumentsAt.
- ASSERT(masm()->StackPointer().Is(jssp));
+ DCHECK(masm()->StackPointer().Is(jssp));
__ Sub(result, jssp, 2 * kPointerSize);
} else {
- ASSERT(instr->temp() != NULL);
+ DCHECK(instr->temp() != NULL);
Register previous_fp = ToRegister(instr->temp());
__ Ldr(previous_fp,
@@ -1737,12 +1775,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
// precision), it should be possible. However, we would need support for
// fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
// support that yet.
- ASSERT(left.Is(d0));
- ASSERT(right.Is(d1));
+ DCHECK(left.Is(d0));
+ DCHECK(right.Is(d1));
__ CallCFunction(
ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
- ASSERT(result.Is(d0));
+ DCHECK(result.Is(d0));
break;
}
default:
@@ -1753,10 +1791,10 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->left()).is(x1));
- ASSERT(ToRegister(instr->right()).is(x0));
- ASSERT(ToRegister(instr->result()).is(x0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(x1));
+ DCHECK(ToRegister(instr->right()).is(x0));
+ DCHECK(ToRegister(instr->result()).is(x0));
BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -1797,20 +1835,20 @@ void LCodeGen::DoBitS(LBitS* instr) {
void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
- ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
- ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
if (instr->index()->IsConstantOperand()) {
Operand index = ToOperand32I(instr->index());
Register length = ToRegister32(instr->length());
__ Cmp(length, index);
- cond = ReverseConditionForCmp(cond);
+ cond = CommuteCondition(cond);
} else {
Register index = ToRegister32(instr->index());
Operand length = ToOperand32I(instr->length());
__ Cmp(index, length);
}
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- __ Assert(InvertCondition(cond), kEliminatedBoundsCheckFailed);
+ __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
DeoptimizeIf(cond, instr->environment());
}
@@ -1823,10 +1861,10 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label* false_label = instr->FalseLabel(chunk_);
if (r.IsInteger32()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
} else if (r.IsSmi()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
STATIC_ASSERT(kSmiTag == 0);
EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
} else if (r.IsDouble()) {
@@ -1834,28 +1872,28 @@ void LCodeGen::DoBranch(LBranch* instr) {
// Test the double value. Zero and NaN are false.
EmitBranchIfNonZeroNumber(instr, value, double_scratch());
} else {
- ASSERT(r.IsTagged());
+ DCHECK(r.IsTagged());
Register value = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
__ CompareRoot(value, Heap::kTrueValueRootIndex);
EmitBranch(instr, eq);
} else if (type.IsSmi()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
} else if (type.IsJSArray()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
EmitGoto(instr->TrueDestination(chunk()));
} else if (type.IsHeapNumber()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
__ Ldr(double_scratch(), FieldMemOperand(value,
HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
} else if (type.IsString()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
Register temp = ToRegister(instr->temp1());
__ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
EmitCompareAndBranch(instr, ne, temp, 0);
@@ -1886,7 +1924,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
- ASSERT(Smi::FromInt(0) == 0);
+ DCHECK(Smi::FromInt(0) == 0);
__ Cbz(value, false_label);
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
@@ -1898,7 +1936,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register scratch = NoReg;
if (expected.NeedsMap()) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
map = ToRegister(instr->temp1());
scratch = ToRegister(instr->temp2());
@@ -1970,7 +2008,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
dont_adapt_arguments || formal_parameter_count == arity;
// The function interface relies on the following register assignments.
- ASSERT(function_reg.Is(x1) || function_reg.IsNone());
+ DCHECK(function_reg.Is(x1) || function_reg.IsNone());
Register arity_reg = x0;
LPointerMap* pointers = instr->pointer_map();
@@ -2015,8 +2053,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->result()).Is(x0));
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToRegister(instr->result()).Is(x0));
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -2030,7 +2068,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
// this understanding is correct.
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
} else {
- ASSERT(instr->target()->IsRegister());
+ DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
@@ -2042,8 +2080,8 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->function()).is(x1));
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToRegister(instr->function()).is(x1));
if (instr->hydrogen()->pass_argument_count()) {
__ Mov(x0, Operand(instr->arity()));
@@ -2068,8 +2106,8 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->result()).is(x0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(x0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpExec: {
RegExpExecStub stub(isolate());
@@ -2101,7 +2139,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
Register temp = ToRegister(instr->temp());
{
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ Push(object);
__ Mov(cp, 0);
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
@@ -2172,7 +2210,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
}
}
@@ -2180,7 +2218,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
- ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
+ DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
DeoptimizeIfNotSmi(value, instr->environment());
}
@@ -2215,7 +2253,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
if (IsPowerOf2(mask)) {
- ASSERT((tag == 0) || (tag == mask));
+ DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
} else {
@@ -2290,7 +2328,7 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
__ Fmov(result_reg, value_reg);
- __ Mov(result_reg, Operand(result_reg, LSR, 32));
+ __ Lsr(result_reg, result_reg, 32);
} else {
__ Fmov(result_reg.W(), value_reg.S());
}
@@ -2300,12 +2338,12 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
Register hi_reg = ToRegister(instr->hi());
Register lo_reg = ToRegister(instr->lo());
- Register temp = ToRegister(instr->temp());
DoubleRegister result_reg = ToDoubleRegister(instr->result());
- __ And(temp, lo_reg, Operand(0xffffffff));
- __ Orr(temp, temp, Operand(hi_reg, LSL, 32));
- __ Fmov(result_reg, temp);
+ // Insert the least significant 32 bits of hi_reg into the most significant
+ // 32 bits of lo_reg, and move to a floating point register.
+ __ Bfi(lo_reg, hi_reg, 32, 32);
+ __ Fmov(result_reg, lo_reg);
}
@@ -2371,7 +2409,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
- ASSERT(instr->hydrogen()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->representation().IsDouble());
FPRegister object = ToDoubleRegister(instr->object());
Register temp = ToRegister(instr->temp());
@@ -2387,7 +2425,7 @@ void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
- ASSERT(instr->hydrogen()->representation().IsTagged());
+ DCHECK(instr->hydrogen()->representation().IsTagged());
Register object = ToRegister(instr->object());
EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
@@ -2405,7 +2443,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Representation rep = instr->hydrogen()->value()->representation();
- ASSERT(!rep.IsInteger32());
+ DCHECK(!rep.IsInteger32());
Register scratch = ToRegister(instr->temp());
if (rep.IsDouble()) {
@@ -2415,8 +2453,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Register value = ToRegister(instr->value());
__ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
instr->FalseLabel(chunk()), DO_SMI_CHECK);
- __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
- __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
+ __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
}
EmitGoto(instr->FalseDestination(chunk()));
}
@@ -2425,7 +2463,10 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- Condition cond = TokenToCondition(instr->op(), false);
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2436,17 +2477,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- if (right->IsConstantOperand()) {
- __ Fcmp(ToDoubleRegister(left),
- ToDouble(LConstantOperand::cast(right)));
- } else if (left->IsConstantOperand()) {
- // Transpose the operands and reverse the condition.
- __ Fcmp(ToDoubleRegister(right),
- ToDouble(LConstantOperand::cast(left)));
- cond = ReverseConditionForCmp(cond);
- } else {
- __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
- }
+ __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
@@ -2460,14 +2491,14 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
ToRegister32(left),
ToOperand32I(right));
} else {
- // Transpose the operands and reverse the condition.
+ // Commute the operands and the condition.
EmitCompareAndBranch(instr,
- ReverseConditionForCmp(cond),
+ CommuteCondition(cond),
ToRegister32(right),
ToOperand32I(left));
}
} else {
- ASSERT(instr->hydrogen_value()->representation().IsSmi());
+ DCHECK(instr->hydrogen_value()->representation().IsSmi());
if (right->IsConstantOperand()) {
int32_t value = ToInteger32(LConstantOperand::cast(right));
EmitCompareAndBranch(instr,
@@ -2475,10 +2506,10 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
ToRegister(left),
Operand(Smi::FromInt(value)));
} else if (left->IsConstantOperand()) {
- // Transpose the operands and reverse the condition.
+ // Commute the operands and the condition.
int32_t value = ToInteger32(LConstantOperand::cast(left));
EmitCompareAndBranch(instr,
- ReverseConditionForCmp(cond),
+ CommuteCondition(cond),
ToRegister(right),
Operand(Smi::FromInt(value)));
} else {
@@ -2501,12 +2532,12 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
void LCodeGen::DoCmpT(LCmpT* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Condition cond = TokenToCondition(op, false);
- ASSERT(ToRegister(instr->left()).Is(x1));
- ASSERT(ToRegister(instr->right()).Is(x0));
+ DCHECK(ToRegister(instr->left()).Is(x1));
+ DCHECK(ToRegister(instr->right()).Is(x0));
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub.
@@ -2514,7 +2545,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
// Return true or false depending on CompareIC result.
// This instruction is marked as call. We can clobber any register.
- ASSERT(instr->IsMarkedAsCall());
+ DCHECK(instr->IsMarkedAsCall());
__ LoadTrueFalseRoots(x1, x2);
__ Cmp(x0, 0);
__ Csel(ToRegister(instr->result()), x1, x2, cond);
@@ -2522,9 +2553,17 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
+ DCHECK(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
- __ Fmov(result, instr->value());
+ if (instr->value() == 0) {
+ if (copysign(1.0, instr->value()) == 1.0) {
+ __ Fmov(result, fp_zero);
+ } else {
+ __ Fneg(result, fp_zero);
+ }
+ } else {
+ __ Fmov(result, instr->value());
+ }
}
@@ -2534,7 +2573,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(is_int32(instr->value()));
+ DCHECK(is_int32(instr->value()));
// Cast the value here to ensure that the value isn't sign extended by the
// implicit Operand constructor.
__ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
@@ -2549,13 +2588,6 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> object = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- if (instr->hydrogen()->HasObjectMap()) {
- Handle<Map> object_map = instr->hydrogen()->ObjectMap().handle();
- ASSERT(object->IsHeapObject());
- ASSERT(!object_map->is_stable() ||
- *object_map == Handle<HeapObject>::cast(object)->map());
- USE(object_map);
- }
__ LoadObject(ToRegister(instr->result()), object);
}
@@ -2567,7 +2599,7 @@ void LCodeGen::DoContext(LContext* instr) {
__ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in cp.
- ASSERT(result.is(cp));
+ DCHECK(result.is(cp));
}
}
@@ -2592,7 +2624,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
last_lazy_deopt_pc_ = masm()->pc_offset();
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -2607,8 +2639,8 @@ void LCodeGen::DoDateField(LDateField* instr) {
Smi* index = instr->index();
Label runtime, done;
- ASSERT(object.is(result) && object.Is(x0));
- ASSERT(instr->IsMarkedAsCall());
+ DCHECK(object.is(result) && object.Is(x0));
+ DCHECK(instr->IsMarkedAsCall());
DeoptimizeIfSmi(object, instr->environment());
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
@@ -2657,19 +2689,20 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister32(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister32(instr->result());
- ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
- ASSERT(!result.is(dividend));
+ DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ Cmp(dividend, 0);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIfZero(dividend, instr->environment());
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- __ Cmp(dividend, kMinInt);
- DeoptimizeIf(eq, instr->environment());
+ // Test dividend for kMinInt by subtracting one (cmp) and checking for
+ // overflow.
+ __ Cmp(dividend, 1);
+ DeoptimizeIf(vs, instr->environment());
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
@@ -2701,7 +2734,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
Register dividend = ToRegister32(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister32(instr->result());
- ASSERT(!AreAliased(dividend, result));
+ DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
Deoptimize(instr->environment());
@@ -2719,7 +2752,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
Register temp = ToRegister32(instr->temp());
- ASSERT(!AreAliased(dividend, result, temp));
+ DCHECK(!AreAliased(dividend, result, temp));
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
@@ -2740,7 +2773,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ Sdiv(result, dividend, divisor);
if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- ASSERT_EQ(NULL, instr->temp());
+ DCHECK_EQ(NULL, instr->temp());
return;
}
@@ -2813,9 +2846,9 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
// FunctionLiteral instruction is marked as call, we can trash any register.
- ASSERT(instr->IsMarkedAsCall());
+ DCHECK(instr->IsMarkedAsCall());
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
@@ -2831,7 +2864,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ Mov(x1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, x2, x1);
- CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
@@ -2861,8 +2894,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register object = ToRegister(instr->object());
Register null_value = x5;
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(object.Is(x0));
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(object.Is(x0));
DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
instr->environment());
@@ -2902,7 +2935,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
__ AssertString(input);
// Assert that we can use a W register load to get the hash.
- ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
+ DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
__ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
}
@@ -2927,7 +2960,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
Register temp = ToRegister32(instr->temp());
// Assert that the cache status bits fit in a W register.
- ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
+ DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
__ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
__ Tst(temp, String::kContainsCachedArrayIndexMask);
EmitBranch(instr, eq);
@@ -2951,7 +2984,7 @@ static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
- ASSERT((from == to) || (to == LAST_TYPE));
+ DCHECK((from == to) || (to == LAST_TYPE));
return from;
}
@@ -2972,7 +3005,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
Register scratch = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
@@ -2992,10 +3025,10 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
// Assert that the arguments are in the registers expected by InstanceofStub.
- ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
- ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
+ DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
+ DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -3034,10 +3067,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register map = x5;
// This instruction is marked as call. We can clobber any register.
- ASSERT(instr->IsMarkedAsCall());
+ DCHECK(instr->IsMarkedAsCall());
// We must take into account that object is in x11.
- ASSERT(object.Is(x11));
+ DCHECK(object.Is(x11));
Register scratch = x10;
// A Smi is not instance of anything.
@@ -3055,15 +3088,15 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(&map_check);
// Will be patched with the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
+ __ ldr(scratch, Immediate(Handle<Object>(cell)));
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ cmp(map, scratch);
__ b(&cache_miss, ne);
// The address of this instruction is computed relative to the map check
// above, so check the size of the code generated.
- ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+ DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
// Will be patched with the cached result.
- __ LoadRelocated(result, Operand(factory()->the_hole_value()));
+ __ ldr(result, Immediate(factory()->the_hole_value()));
}
__ B(&done);
@@ -3096,7 +3129,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register result = ToRegister(instr->result());
- ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
+ DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
@@ -3105,11 +3138,11 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kCallSiteInlineCheck);
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
LoadContextFromDeferred(instr->context());
// Prepare InstanceofStub arguments.
- ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
+ DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
__ LoadObject(InstanceofStub::right(), instr->function());
InstanceofStub stub(isolate(), flags);
@@ -3138,10 +3171,10 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
// The function is required to be in x1.
- ASSERT(ToRegister(instr->function()).is(x1));
- ASSERT(instr->HasPointerMap());
+ DCHECK(ToRegister(instr->function()).is(x1));
+ DCHECK(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
@@ -3226,7 +3259,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register scratch = ToRegister(instr->temp());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
@@ -3246,7 +3279,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
__ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -3299,16 +3332,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
- // Check that the function really is a function. Leaves map in the result
- // register.
- __ CompareObjectType(function, result, temp, JS_FUNCTION_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
- __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
-
// Get the prototype or initial map from the function.
__ Ldr(result, FieldMemOperand(function,
JSFunction::kPrototypeOrInitialMapOffset));
@@ -3324,12 +3347,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Get the prototype from the initial map.
__ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ B(&done);
-
- // Non-instance prototype: fetch prototype from constructor field in initial
- // map.
- __ Bind(&non_instance);
- __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
// All done.
__ Bind(&done);
@@ -3348,10 +3365,19 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).Is(x0));
- ASSERT(ToRegister(instr->result()).Is(x0));
- __ Mov(x2, Operand(instr->name()));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).Is(x0));
+ __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(LoadIC::VectorRegister()));
+ __ Mov(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(LoadIC::SlotRegister().is(x0));
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(instr->hydrogen()->slot()));
+ }
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3366,29 +3392,25 @@ MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
- int additional_index) {
+ int base_offset) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int additional_offset = additional_index << element_size_shift;
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
- additional_offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
- }
if (key_is_constant) {
int key_offset = constant_key << element_size_shift;
- return MemOperand(base, key_offset + additional_offset);
+ return MemOperand(base, key_offset + base_offset);
}
if (key_is_smi) {
__ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
- return MemOperand(scratch, additional_offset);
+ return MemOperand(scratch, base_offset);
}
- if (additional_offset == 0) {
+ if (base_offset == 0) {
return MemOperand(base, key, SXTW, element_size_shift);
}
- ASSERT(!AreAliased(scratch, key));
- __ Add(scratch, base, additional_offset);
+ DCHECK(!AreAliased(scratch, key));
+ __ Add(scratch, base, base_offset);
return MemOperand(scratch, key, SXTW, element_size_shift);
}
@@ -3403,7 +3425,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
Register key = no_reg;
int constant_key = 0;
if (key_is_constant) {
- ASSERT(instr->temp() == NULL);
+ DCHECK(instr->temp() == NULL);
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
@@ -3417,7 +3439,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
key_is_constant, constant_key,
elements_kind,
- instr->additional_index());
+ instr->base_offset());
if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
(elements_kind == FLOAT32_ELEMENTS)) {
@@ -3488,8 +3510,9 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
bool key_is_tagged,
ElementsKind elements_kind,
Representation representation,
- int additional_index) {
- STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+ int base_offset) {
+ STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+ STATIC_ASSERT(kSmiTag == 0);
int element_size_shift = ElementsKindToShiftSize(elements_kind);
// Even though the HLoad/StoreKeyed instructions force the input
@@ -3499,25 +3522,23 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
if (key_is_tagged) {
__ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
if (representation.IsInteger32()) {
- ASSERT(elements_kind == FAST_SMI_ELEMENTS);
- // Read or write only the most-significant 32 bits in the case of fast smi
- // arrays.
- return UntagSmiFieldMemOperand(base, additional_index);
+ DCHECK(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the smi payload in the case of fast smi arrays.
+ return UntagSmiMemOperand(base, base_offset);
} else {
- return FieldMemOperand(base, additional_index);
+ return MemOperand(base, base_offset);
}
} else {
// Sign extend key because it could be a 32-bit negative value or contain
// garbage in the top 32-bits. The address computation happens in 64-bit.
- ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
+ DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
if (representation.IsInteger32()) {
- ASSERT(elements_kind == FAST_SMI_ELEMENTS);
- // Read or write only the most-significant 32 bits in the case of fast smi
- // arrays.
+ DCHECK(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the smi payload in the case of fast smi arrays.
__ Add(base, elements, Operand(key, SXTW, element_size_shift));
- return UntagSmiFieldMemOperand(base, additional_index);
+ return UntagSmiMemOperand(base, base_offset);
} else {
- __ Add(base, elements, additional_index - kHeapObjectTag);
+ __ Add(base, elements, base_offset);
return MemOperand(base, key, SXTW, element_size_shift);
}
}
@@ -3530,25 +3551,23 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
MemOperand mem_op;
if (instr->key()->IsConstantOperand()) {
- ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
+ DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
(instr->temp() == NULL));
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- mem_op = FieldMemOperand(elements, offset);
+ int offset = instr->base_offset() + constant_key * kDoubleSize;
+ mem_op = MemOperand(elements, offset);
} else {
Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
instr->hydrogen()->representation(),
- offset);
+ instr->base_offset());
}
__ Ldr(result, mem_op);
@@ -3572,27 +3591,26 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
Representation representation = instr->hydrogen()->representation();
if (instr->key()->IsConstantOperand()) {
- ASSERT(instr->temp() == NULL);
+ DCHECK(instr->temp() == NULL);
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ int offset = instr->base_offset() +
+ ToInteger32(const_operand) * kPointerSize;
if (representation.IsInteger32()) {
- ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
- (kSmiTag == 0));
- mem_op = UntagSmiFieldMemOperand(elements, offset);
+ DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+ STATIC_ASSERT(kSmiTag == 0);
+ mem_op = UntagSmiMemOperand(elements, offset);
} else {
- mem_op = FieldMemOperand(elements, offset);
+ mem_op = MemOperand(elements, offset);
}
} else {
Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
- representation, offset);
+ representation, instr->base_offset());
}
__ Load(result, mem_op, representation);
@@ -3609,14 +3627,23 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).Is(x1));
- ASSERT(ToRegister(instr->key()).Is(x0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ if (FLAG_vector_ics) {
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(LoadIC::VectorRegister()));
+ __ Mov(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(LoadIC::SlotRegister().is(x0));
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(instr->hydrogen()->slot()));
+ }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- ASSERT(ToRegister(instr->result()).Is(x0));
+ DCHECK(ToRegister(instr->result()).Is(x0));
}
@@ -3650,7 +3677,8 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.representation().IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
// Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+ STATIC_ASSERT(kSmiTag == 0);
__ Load(result, UntagSmiFieldMemOperand(source, offset),
Representation::Integer32());
} else {
@@ -3660,15 +3688,24 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
- ASSERT(ToRegister(instr->object()).is(x0));
- __ Mov(x2, Operand(instr->name()));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ // LoadIC expects name and receiver in registers.
+ DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(LoadIC::VectorRegister()));
+ __ Mov(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(LoadIC::SlotRegister().is(x0));
+ __ Mov(LoadIC::SlotRegister(),
+ Smi::FromInt(instr->hydrogen()->slot()));
+ }
Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- ASSERT(ToRegister(instr->result()).is(x0));
+ DCHECK(ToRegister(instr->result()).is(x0));
}
@@ -3714,8 +3751,8 @@ void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
// - The (smi) input -0x80000000, produces +0x80000000, which does not fit
// a smi. In this case, the inline code sets the result and jumps directly
// to the allocation_entry label.
- ASSERT(instr->context() != NULL);
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(instr->context() != NULL);
+ DCHECK(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
@@ -3761,8 +3798,8 @@ void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
__ Bind(&result_ok);
}
- { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+ { PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
instr->context());
__ StoreToSafepointRegisterSlot(x0, result);
}
@@ -3789,12 +3826,12 @@ void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
// TODO(jbramley): The early-exit mechanism would skip the new frame handling
// in GenerateDeferredCode. Tidy this up.
- ASSERT(!NeedsDeferredFrame());
+ DCHECK(!NeedsDeferredFrame());
DeferredMathAbsTagged* deferred =
new(zone()) DeferredMathAbsTagged(this, instr);
- ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
+ DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
instr->hydrogen()->value()->representation().IsSmi());
Register input = ToRegister(instr->value());
Register result_bits = ToRegister(instr->temp3());
@@ -3870,9 +3907,14 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
Register result = ToRegister32(instr->result());
int32_t divisor = instr->divisor();
+ // If the divisor is 1, return the dividend.
+ if (divisor == 1) {
+ __ Mov(result, dividend, kDiscardForSameWReg);
+ return;
+ }
+
// If the divisor is positive, things are easy: There can be no deopts and we
// can simply do an arithmetic right shift.
- if (divisor == 1) return;
int32_t shift = WhichPowerOf2Abs(divisor);
if (divisor > 1) {
__ Mov(result, Operand(dividend, ASR, shift));
@@ -3885,26 +3927,22 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
DeoptimizeIf(eq, instr->environment());
}
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ Mov(result, Operand(dividend, ASR, shift));
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(vs, instr->environment());
+ }
return;
}
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- DeoptimizeIf(vs, instr->environment());
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ Mov(result, Operand(dividend, ASR, shift));
return;
}
- // Using a conditional data processing instruction would need 1 more register.
- Label not_kmin_int, done;
- __ B(vc, &not_kmin_int);
- __ Mov(result, kMinInt / divisor);
- __ B(&done);
- __ bind(&not_kmin_int);
- __ Mov(result, Operand(dividend, ASR, shift));
- __ bind(&done);
+ __ Asr(result, result, shift);
+ __ Csel(result, result, kMinInt / divisor, vc);
}
@@ -3912,7 +3950,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
Register dividend = ToRegister32(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister32(instr->result());
- ASSERT(!AreAliased(dividend, result));
+ DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
Deoptimize(instr->environment());
@@ -3922,8 +3960,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ Cmp(dividend, 0);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIfZero(dividend, instr->environment());
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -3938,19 +3975,19 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
// In the general case we may need to adjust before and after the truncating
// division to get a flooring division.
Register temp = ToRegister32(instr->temp());
- ASSERT(!AreAliased(temp, dividend, result));
+ DCHECK(!AreAliased(temp, dividend, result));
Label needs_adjustment, done;
__ Cmp(dividend, 0);
__ B(divisor > 0 ? lt : gt, &needs_adjustment);
__ TruncatingDiv(result, dividend, Abs(divisor));
if (divisor < 0) __ Neg(result, result);
__ B(&done);
- __ bind(&needs_adjustment);
+ __ Bind(&needs_adjustment);
__ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
__ TruncatingDiv(result, temp, Abs(divisor));
if (divisor < 0) __ Neg(result, result);
__ Sub(result, result, Operand(1));
- __ bind(&done);
+ __ Bind(&done);
}
@@ -4001,11 +4038,11 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToDoubleRegister(instr->value()).is(d0));
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToDoubleRegister(instr->value()).is(d0));
__ CallCFunction(ExternalReference::math_log_double_function(isolate()),
0, 1);
- ASSERT(ToDoubleRegister(instr->result()).Is(d0));
+ DCHECK(ToDoubleRegister(instr->result()).Is(d0));
}
@@ -4044,13 +4081,13 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
+ DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d1));
- ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+ DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
ToRegister(instr->right()).is(x11));
- ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
- ASSERT(ToDoubleRegister(instr->left()).is(d0));
- ASSERT(ToDoubleRegister(instr->result()).is(d0));
+ DCHECK(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ DCHECK(ToDoubleRegister(instr->left()).is(d0));
+ DCHECK(ToDoubleRegister(instr->result()).is(d0));
if (exponent_type.IsSmi()) {
MathPowStub stub(isolate(), MathPowStub::TAGGED);
@@ -4072,7 +4109,7 @@ void LCodeGen::DoPower(LPower* instr) {
MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
- ASSERT(exponent_type.IsDouble());
+ DCHECK(exponent_type.IsDouble());
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
@@ -4084,7 +4121,7 @@ void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister scratch_d = double_scratch();
- ASSERT(!AreAliased(input, result, scratch_d));
+ DCHECK(!AreAliased(input, result, scratch_d));
Label done;
@@ -4111,9 +4148,9 @@ void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
+ DoubleRegister temp = ToDoubleRegister(instr->temp1());
+ DoubleRegister dot_five = double_scratch();
Register result = ToRegister(instr->result());
- Label try_rounding;
Label done;
// Math.round() rounds to the nearest integer, with ties going towards
@@ -4124,46 +4161,53 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
// result of -0.0.
- DoubleRegister dot_five = double_scratch();
+ // Add 0.5 and round towards -infinity.
__ Fmov(dot_five, 0.5);
- __ Fabs(temp1, input);
- __ Fcmp(temp1, dot_five);
- // If input is in [-0.5, -0], the result is -0.
- // If input is in [+0, +0.5[, the result is +0.
- // If the input is +0.5, the result is 1.
- __ B(hi, &try_rounding); // hi so NaN will also branch.
+ __ Fadd(temp, input, dot_five);
+ __ Fcvtms(result, temp);
+
+ // The result is correct if:
+ // result is not 0, as the input could be NaN or [-0.5, -0.0].
+ // result is not 1, as 0.499...94 will wrongly map to 1.
+ // result fits in 32 bits.
+ __ Cmp(result, Operand(result.W(), SXTW));
+ __ Ccmp(result, 1, ZFlag, eq);
+ __ B(hi, &done);
+
+ // At this point, we have to handle possible inputs of NaN or numbers in the
+ // range [-0.5, 1.5[, or numbers larger than 32 bits.
+
+ // Deoptimize if the result > 1, as it must be larger than 32 bits.
+ __ Cmp(result, 1);
+ DeoptimizeIf(hi, instr->environment());
+ // Deoptimize for negative inputs, which at this point are only numbers in
+ // the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0].
+ DeoptimizeIfNegative(result, instr->environment());
}
- __ Fcmp(input, dot_five);
- __ Mov(result, 1); // +0.5.
- // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
- // flag kBailoutOnMinusZero, will return 0 (xzr).
- __ Csel(result, result, xzr, eq);
- __ B(&done);
- __ Bind(&try_rounding);
- // Since we're providing a 32-bit result, we can implement ties-to-infinity by
- // adding 0.5 to the input, then taking the floor of the result. This does not
- // work for very large positive doubles because adding 0.5 would cause an
- // intermediate rounding stage, so a different approach is necessary when a
- // double result is needed.
- __ Fadd(temp1, input, dot_five);
- __ Fcvtms(result, temp1);
-
- // Deopt if
- // * the input was NaN
- // * the result is not representable using a 32-bit integer.
- __ Fcmp(input, 0.0);
- __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
- DeoptimizeIf(ne, instr->environment());
+ // Deoptimize if the input was NaN.
+ __ Fcmp(input, dot_five);
+ DeoptimizeIf(vs, instr->environment());
+ // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
+ // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
+ // else 0; we avoid dealing with 0.499...94 directly.
+ __ Cset(result, ge);
__ Bind(&done);
}
+void LCodeGen::DoMathFround(LMathFround* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fcvt(result.S(), input);
+ __ Fcvt(result, result.S());
+}
+
+
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
@@ -4188,7 +4232,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ Cmp(left, right);
__ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
} else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->representation().IsDouble());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right());
@@ -4196,7 +4240,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
if (op == HMathMinMax::kMathMax) {
__ Fmax(result, left, right);
} else {
- ASSERT(op == HMathMinMax::kMathMin);
+ DCHECK(op == HMathMinMax::kMathMin);
__ Fmin(result, left, right);
}
}
@@ -4206,7 +4250,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
Register dividend = ToRegister32(instr->dividend());
int32_t divisor = instr->divisor();
- ASSERT(dividend.is(ToRegister32(instr->result())));
+ DCHECK(dividend.is(ToRegister32(instr->result())));
// Theoretically, a variation of the branch-free code for integer division by
// a power of 2 (calculating the remainder via an additional multiplication
@@ -4218,8 +4262,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
Label dividend_is_not_negative, done;
if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ Cmp(dividend, 0);
- __ B(pl, &dividend_is_not_negative);
+ __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
// Note that this is correct even for kMinInt operands.
__ Neg(dividend, dividend);
__ And(dividend, dividend, mask);
@@ -4241,7 +4284,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
int32_t divisor = instr->divisor();
Register result = ToRegister32(instr->result());
Register temp = ToRegister32(instr->temp());
- ASSERT(!AreAliased(dividend, result, temp));
+ DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
Deoptimize(instr->environment());
@@ -4285,14 +4328,14 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
- ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
+ DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
bool is_smi = instr->hydrogen()->representation().IsSmi();
Register result =
is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
Register left =
is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
int32_t right = ToInteger32(instr->right());
- ASSERT((right > -kMaxInt) || (right < kMaxInt));
+ DCHECK((right > -kMaxInt) || (right < kMaxInt));
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
@@ -4346,7 +4389,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
Register scratch = result;
- ASSERT(!AreAliased(scratch, left));
+ DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
DeoptimizeIf(lt, instr->environment());
@@ -4371,7 +4414,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// For the following cases, we could perform a conservative overflow check
// with CLS as above. However the few cycles saved are likely not worth
// the risk of deoptimizing more often than required.
- ASSERT(!can_overflow);
+ DCHECK(!can_overflow);
if (right >= 0) {
if (IsPowerOf2(right - 1)) {
@@ -4469,7 +4512,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ SmiUntag(result, left);
__ Mul(result, result, right);
} else {
- ASSERT(!left.Is(result));
+ DCHECK(!left.Is(result));
// Registers result and right alias, left is distinct, or all registers
// are distinct: untag right into result, and then multiply by left,
// giving a tagged result.
@@ -4487,14 +4530,14 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
Register result = ToRegister(instr->result());
__ Mov(result, 0);
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
// NumberTagU and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kHiddenAllocateHeapNumber.
+ // They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, result);
@@ -4552,15 +4595,15 @@ void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
__ Mov(dst, 0);
{
// Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
// NumberTagU and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kHiddenAllocateHeapNumber.
+ // They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, dst);
@@ -4649,7 +4692,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
}
} else {
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
// Fall through to load_smi.
}
@@ -4669,7 +4712,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
+ DCHECK(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
GenerateOsrPrologue();
@@ -4681,14 +4724,27 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- } else {
- __ Push(ToRegister(argument));
- after_push_argument_ = true;
+void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
+ __ PushPreamble(instr->argc(), kPointerSize);
+}
+
+
+void LCodeGen::DoPushArguments(LPushArguments* instr) {
+ MacroAssembler::PushPopQueue args(masm());
+
+ for (int i = 0; i < instr->ArgumentCount(); ++i) {
+ LOperand* arg = instr->argument(i);
+ if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ return;
+ }
+ args.Queue(ToRegister(arg));
}
+
+ // The preamble was done by LPreparePushArguments.
+ args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
+
+ after_push_argument_ = true;
}
@@ -4795,7 +4851,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
Register temp = ToRegister(instr->temp());
if (FLAG_debug_code) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Register index = ToRegister(instr->index());
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
@@ -4865,7 +4921,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
default: UNREACHABLE();
}
} else {
- ASSERT(right_op->IsConstantOperand());
+ DCHECK(right_op->IsConstantOperand());
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
@@ -4891,7 +4947,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
Register result = ToRegister(instr->result());
// Only ROR by register needs a temp.
- ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
+ DCHECK(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
(instr->temp() == NULL));
if (right_op->IsRegister()) {
@@ -4928,7 +4984,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
default: UNREACHABLE();
}
} else {
- ASSERT(right_op->IsConstantOperand());
+ DCHECK(right_op->IsConstantOperand());
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
@@ -4966,10 +5022,10 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Register scratch1 = x5;
Register scratch2 = x6;
- ASSERT(instr->IsMarkedAsCall());
+ DCHECK(instr->IsMarkedAsCall());
ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
// TODO(all): if Mov could handle object in new space then it could be used
@@ -4977,17 +5033,17 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(cp, scratch1, scratch2); // The context is the first argument.
- CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -5004,7 +5060,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
LStackCheck* instr_;
};
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
// There is no LLazyBailout instruction for stack-checks. We have to
// prepare for lazy deoptimization explicitly here.
@@ -5016,14 +5072,14 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
PredictableCodeSizeScope predictable(masm_,
Assembler::kCallSizeWithRelocation);
- ASSERT(instr->context()->IsRegister());
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(instr->context()->IsRegister());
+ DCHECK(ToRegister(instr->context()).is(cp));
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
__ Bind(&done);
} else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
+ DCHECK(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
new(zone()) DeferredStackCheck(this, instr);
@@ -5071,7 +5127,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ Str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
@@ -5120,7 +5176,7 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
if (key_is_constant) {
- ASSERT(instr->temp() == NULL);
+ DCHECK(instr->temp() == NULL);
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
@@ -5134,7 +5190,7 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
key_is_constant, constant_key,
elements_kind,
- instr->additional_index());
+ instr->base_offset());
if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
(elements_kind == FLOAT32_ELEMENTS)) {
@@ -5199,18 +5255,16 @@ void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- mem_op = FieldMemOperand(elements, offset);
+ int offset = instr->base_offset() + constant_key * kDoubleSize;
+ mem_op = MemOperand(elements, offset);
} else {
Register store_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
instr->hydrogen()->representation(),
- offset);
+ instr->base_offset());
}
if (instr->NeedsCanonicalization()) {
@@ -5238,51 +5292,51 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
Representation representation = instr->hydrogen()->value()->representation();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ int offset = instr->base_offset() +
+ ToInteger32(const_operand) * kPointerSize;
store_base = elements;
if (representation.IsInteger32()) {
- ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
- (kSmiTag == 0));
- mem_op = UntagSmiFieldMemOperand(store_base, offset);
+ DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+ STATIC_ASSERT(kSmiTag == 0);
+ mem_op = UntagSmiMemOperand(store_base, offset);
} else {
- mem_op = FieldMemOperand(store_base, offset);
+ mem_op = MemOperand(store_base, offset);
}
} else {
store_base = scratch;
key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
- representation, offset);
+ representation, instr->base_offset());
}
__ Store(value, mem_op, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(representation.IsTagged());
+ DCHECK(representation.IsTagged());
// This assignment may cause element_addr to alias store_base.
Register element_addr = scratch;
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
__ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
- kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
+ kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).Is(x2));
- ASSERT(ToRegister(instr->key()).Is(x1));
- ASSERT(ToRegister(instr->value()).Is(x0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
@@ -5299,8 +5353,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
int offset = access.offset();
if (access.IsExternalMemory()) {
- ASSERT(!instr->hydrogen()->has_transition());
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DCHECK(!instr->hydrogen()->has_transition());
+ DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
Register value = ToRegister(instr->value());
__ Store(value, MemOperand(object, offset), representation);
return;
@@ -5309,9 +5363,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ AssertNotSmi(object);
if (representation.IsDouble()) {
- ASSERT(access.IsInobject());
- ASSERT(!instr->hydrogen()->has_transition());
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DCHECK(access.IsInobject());
+ DCHECK(!instr->hydrogen()->has_transition());
+ DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
FPRegister value = ToDoubleRegister(instr->value());
__ Str(value, FieldMemOperand(object, offset));
return;
@@ -5319,7 +5373,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
- ASSERT(!representation.IsSmi() ||
+ DCHECK(!representation.IsSmi() ||
!instr->value()->IsConstantOperand() ||
IsInteger32Constant(LConstantOperand::cast(instr->value())));
@@ -5332,14 +5386,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
// Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- new_map_value,
- ToRegister(instr->temp1()),
- GetLinkRegisterState(),
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteForMap(object,
+ new_map_value,
+ ToRegister(instr->temp1()),
+ GetLinkRegisterState(),
+ kSaveFPRegs);
}
}
@@ -5355,7 +5406,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (representation.IsSmi() &&
instr->hydrogen()->value()->representation().IsInteger32()) {
- ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
#ifdef DEBUG
Register temp0 = ToRegister(instr->temp0());
__ Ldr(temp0, FieldMemOperand(destination, offset));
@@ -5363,11 +5414,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// If destination aliased temp0, restore it to the address calculated
// earlier.
if (destination.Is(temp0)) {
- ASSERT(!access.IsInobject());
+ DCHECK(!access.IsInobject());
__ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
}
#endif
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+ STATIC_ASSERT(kSmiTag == 0);
__ Store(value, UntagSmiFieldMemOperand(destination, offset),
Representation::Integer32());
} else {
@@ -5381,27 +5433,27 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier());
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->value()).is(x0));
- ASSERT(ToRegister(instr->object()).is(x1));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
- // Name must be in x2.
- __ Mov(x2, Operand(instr->name()));
+ __ Mov(StoreIC::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->left()).Is(x1));
- ASSERT(ToRegister(instr->right()).Is(x0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).Is(x1));
+ DCHECK(ToRegister(instr->right()).Is(x0));
StringAddStub stub(isolate(),
instr->hydrogen()->flags(),
instr->hydrogen()->pretenure_flag());
@@ -5441,15 +5493,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// contained in the register pointer map.
__ Mov(result, 0);
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ Push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ Push(index);
+ __ SmiTagAndPush(index);
- CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
instr->context());
__ AssertSmi(x0);
__ SmiUntag(x0);
@@ -5471,7 +5522,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode* deferred =
new(zone()) DeferredStringCharFromCode(this, instr);
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister32(instr->char_code());
Register result = ToRegister(instr->result());
@@ -5495,16 +5546,15 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
// contained in the register pointer map.
__ Mov(result, 0);
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(char_code);
- __ Push(char_code);
+ PushSafepointRegistersScope scope(this);
+ __ SmiTagAndPush(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(x0, result);
}
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -5647,15 +5697,15 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).Is(x0));
- ASSERT(ToRegister(instr->result()).Is(x0));
+ DCHECK(ToRegister(instr->value()).Is(x0));
+ DCHECK(ToRegister(instr->result()).Is(x0));
__ Push(x0);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
// x7 = literals array.
@@ -5674,7 +5724,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ Mov(x11, Operand(instr->hydrogen()->pattern()));
__ Mov(x10, Operand(instr->hydrogen()->flags()));
__ Push(x7, x12, x11, x10);
- CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ Mov(x1, x0);
__ Bind(&materialized);
@@ -5687,7 +5737,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ Bind(&runtime_allocate);
__ Mov(x0, Smi::FromInt(size));
__ Push(x1, x0);
- CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ Pop(x1);
__ Bind(&allocated);
@@ -5713,8 +5763,8 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Mov(new_map, Operand(to_map));
__ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
// Write barrier.
- __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
- GetLinkRegisterState(), kDontSaveFPRegs);
+ __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
+ kDontSaveFPRegs);
} else {
{
UseScratchRegisterScope temps(masm());
@@ -5723,15 +5773,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
DONT_DO_SMI_CHECK);
}
- ASSERT(object.is(x0));
- ASSERT(ToRegister(instr->context()).is(cp));
- PushSafepointRegistersScope scope(
- this, Safepoint::kWithRegistersAndDoubles);
+ DCHECK(object.is(x0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(this);
__ Mov(x1, Operand(to_map));
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(
+ RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
__ Bind(&not_applicable);
@@ -5775,7 +5824,7 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Factory* factory = isolate()->factory();
if (String::Equals(type_name, factory->number_string())) {
- ASSERT(instr->temp1() != NULL);
+ DCHECK(instr->temp1() != NULL);
Register map = ToRegister(instr->temp1());
__ JumpIfSmi(value, true_label);
@@ -5784,7 +5833,7 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
EmitBranch(instr, eq);
} else if (String::Equals(type_name, factory->string_string())) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
Register map = ToRegister(instr->temp1());
Register scratch = ToRegister(instr->temp2());
@@ -5795,7 +5844,7 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
} else if (String::Equals(type_name, factory->symbol_string())) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
Register map = ToRegister(instr->temp1());
Register scratch = ToRegister(instr->temp2());
@@ -5808,13 +5857,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
__ CompareRoot(value, Heap::kFalseValueRootIndex);
EmitBranch(instr, eq);
- } else if (FLAG_harmony_typeof &&
- String::Equals(type_name, factory->null_string())) {
- __ CompareRoot(value, Heap::kNullValueRootIndex);
- EmitBranch(instr, eq);
-
} else if (String::Equals(type_name, factory->undefined_string())) {
- ASSERT(instr->temp1() != NULL);
+ DCHECK(instr->temp1() != NULL);
Register scratch = ToRegister(instr->temp1());
__ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
@@ -5826,7 +5870,7 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
} else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- ASSERT(instr->temp1() != NULL);
+ DCHECK(instr->temp1() != NULL);
Register type = ToRegister(instr->temp1());
__ JumpIfSmi(value, false_label);
@@ -5835,20 +5879,18 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
} else if (String::Equals(type_name, factory->object_string())) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
Register map = ToRegister(instr->temp1());
Register scratch = ToRegister(instr->temp2());
__ JumpIfSmi(value, false_label);
- if (!FLAG_harmony_typeof) {
- __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
- }
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
__ JumpIfObjectType(value, map, scratch,
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
__ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ B(gt, false_label);
// Check for undetectable objects => false.
- __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
} else {
@@ -5910,7 +5952,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
__ B(&done);
__ Bind(&copy_receiver);
@@ -5923,7 +5965,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ Push(object);
__ Push(index);
__ Mov(cp, 0);
@@ -5993,4 +6035,21 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ Bind(&done);
}
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ Push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
index 8c25e6340..bb06f483a 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -5,14 +5,14 @@
#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
-#include "arm64/lithium-arm64.h"
+#include "src/arm64/lithium-arm64.h"
-#include "arm64/lithium-gap-resolver-arm64.h"
-#include "deoptimizer.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "utils.h"
+#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -44,7 +44,7 @@ class LCodeGen: public LCodeGenBase {
}
~LCodeGen() {
- ASSERT(!after_push_argument_ || inlined_arguments_);
+ DCHECK(!after_push_argument_ || inlined_arguments_);
}
// Simple accessors.
@@ -255,14 +255,14 @@ class LCodeGen: public LCodeGenBase {
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
- int additional_index);
+ int base_offset);
MemOperand PrepareKeyedArrayOperand(Register base,
Register elements,
Register key,
bool key_is_tagged,
ElementsKind elements_kind,
Representation representation,
- int additional_index);
+ int base_offset);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
@@ -348,9 +348,6 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -388,12 +385,11 @@ class LCodeGen: public LCodeGenBase {
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
- PushSafepointRegistersScope(LCodeGen* codegen,
- Safepoint::Kind kind)
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
- ASSERT(codegen_->info()->is_calling());
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = kind;
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
UseScratchRegisterScope temps(codegen_->masm_);
// Preserve the value of lr which must be saved on the stack (the call to
@@ -401,39 +397,14 @@ class LCodeGen: public LCodeGenBase {
Register to_be_pushed_lr =
temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
codegen_->masm_->Mov(to_be_pushed_lr, lr);
- switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters: {
- StoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
- codegen_->masm_->CallStub(&stub);
- break;
- }
- case Safepoint::kWithRegistersAndDoubles: {
- StoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
- codegen_->masm_->CallStub(&stub);
- break;
- }
- default:
- UNREACHABLE();
- }
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
}
~PushSafepointRegistersScope() {
- Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
- ASSERT((kind & Safepoint::kWithRegisters) != 0);
- switch (kind) {
- case Safepoint::kWithRegisters: {
- RestoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
- codegen_->masm_->CallStub(&stub);
- break;
- }
- case Safepoint::kWithRegistersAndDoubles: {
- RestoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
- codegen_->masm_->CallStub(&stub);
- break;
- }
- default:
- UNREACHABLE();
- }
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
index c721cb48a..d06a37bc4 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
@@ -2,38 +2,38 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#include "arm64/lithium-gap-resolver-arm64.h"
-#include "arm64/lithium-codegen-arm64.h"
+#include "src/arm64/delayed-masm-arm64-inl.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/arm64/lithium-gap-resolver-arm64.h"
namespace v8 {
namespace internal {
-// We use the root register to spill a value while breaking a cycle in parallel
-// moves. We don't need access to roots while resolving the move list and using
-// the root register has two advantages:
-// - It is not in crankshaft allocatable registers list, so it can't interfere
-// with any of the moves we are resolving.
-// - We don't need to push it on the stack, as we can reload it with its value
-// once we have resolved a cycle.
-#define kSavedValue root
+#define __ ACCESS_MASM((&masm_))
-// We use the MacroAssembler floating-point scratch register to break a cycle
-// involving double values as the MacroAssembler will not need it for the
-// operations performed by the gap resolver.
-#define kSavedDoubleValue fp_scratch
+void DelayedGapMasm::EndDelayedUse() {
+ DelayedMasm::EndDelayedUse();
+ if (scratch_register_used()) {
+ DCHECK(ScratchRegister().Is(root));
+ DCHECK(!pending());
+ InitializeRootRegister();
+ reset_scratch_register_used();
+ }
+}
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
- saved_destination_(NULL), need_to_restore_root_(false) { }
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()),
+ root_index_(0), in_cycle_(false), saved_destination_(NULL) {
+}
-#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
+ DCHECK(moves_.is_empty());
+ DCHECK(!masm_.pending());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
@@ -56,16 +56,12 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) {
LMoveOperands move = moves_[i];
if (!move.IsEliminated()) {
- ASSERT(move.source()->IsConstantOperand());
+ DCHECK(move.source()->IsConstantOperand());
EmitMove(i);
}
}
- if (need_to_restore_root_) {
- ASSERT(kSavedValue.Is(root));
- __ InitializeRootRegister();
- need_to_restore_root_ = false;
- }
+ __ EndDelayedUse();
moves_.Rewind(0);
}
@@ -92,13 +88,13 @@ void LGapResolver::PerformMove(int index) {
// cycles in the move graph.
LMoveOperands& current_move = moves_[index];
- ASSERT(!current_move.IsPending());
- ASSERT(!current_move.IsRedundant());
+ DCHECK(!current_move.IsPending());
+ DCHECK(!current_move.IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
- ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
+ DCHECK(current_move.source() != NULL); // Otherwise it will look eliminated.
LOperand* destination = current_move.destination();
current_move.set_destination(NULL);
@@ -125,7 +121,7 @@ void LGapResolver::PerformMove(int index) {
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
+ DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
@@ -136,12 +132,12 @@ void LGapResolver::PerformMove(int index) {
void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
@@ -149,13 +145,8 @@ void LGapResolver::Verify() {
void LGapResolver::BreakCycle(int index) {
- ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
- ASSERT(!in_cycle_);
-
- // We use registers which are not allocatable by crankshaft to break the cycle
- // to be sure they don't interfere with the moves we are resolving.
- ASSERT(!kSavedValue.IsAllocatable());
- ASSERT(!kSavedDoubleValue.IsAllocatable());
+ DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ DCHECK(!in_cycle_);
// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
@@ -165,19 +156,15 @@ void LGapResolver::BreakCycle(int index) {
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
- need_to_restore_root_ = true;
- __ Mov(kSavedValue, cgen_->ToRegister(source));
+ AcquireSavedValueRegister();
+ __ Mov(SavedValueRegister(), cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
- need_to_restore_root_ = true;
- __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ AcquireSavedValueRegister();
+ __ Load(SavedValueRegister(), cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
- ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
- cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
- __ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source));
+ __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
- ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
- cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
- __ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source));
+ __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
@@ -190,19 +177,20 @@ void LGapResolver::BreakCycle(int index) {
void LGapResolver::RestoreValue() {
- ASSERT(in_cycle_);
- ASSERT(saved_destination_ != NULL);
+ DCHECK(in_cycle_);
+ DCHECK(saved_destination_ != NULL);
if (saved_destination_->IsRegister()) {
- __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
+ __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister());
+ ReleaseSavedValueRegister();
} else if (saved_destination_->IsStackSlot()) {
- __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_));
+ ReleaseSavedValueRegister();
} else if (saved_destination_->IsDoubleRegister()) {
- __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue);
- cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ __ Fmov(cgen_->ToDoubleRegister(saved_destination_),
+ SavedFPValueRegister());
} else if (saved_destination_->IsDoubleStackSlot()) {
- __ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_));
- cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
@@ -224,16 +212,16 @@ void LGapResolver::EmitMove(int index) {
if (destination->IsRegister()) {
__ Mov(cgen_->ToRegister(destination), source_register);
} else {
- ASSERT(destination->IsStackSlot());
- __ Str(source_register, cgen_->ToMemOperand(destination));
+ DCHECK(destination->IsStackSlot());
+ __ Store(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
- __ Ldr(cgen_->ToRegister(destination), source_operand);
+ __ Load(cgen_->ToRegister(destination), source_operand);
} else {
- ASSERT(destination->IsStackSlot());
+ DCHECK(destination->IsStackSlot());
EmitStackSlotMove(index);
}
@@ -252,17 +240,30 @@ void LGapResolver::EmitMove(int index) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
__ Fmov(result, cgen_->ToDouble(constant_source));
} else {
- ASSERT(destination->IsStackSlot());
- ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- need_to_restore_root_ = true;
+ DCHECK(destination->IsStackSlot());
+ DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsSmi(constant_source)) {
- __ Mov(kSavedValue, cgen_->ToSmi(constant_source));
+ Smi* smi = cgen_->ToSmi(constant_source);
+ __ StoreConstant(reinterpret_cast<intptr_t>(smi),
+ cgen_->ToMemOperand(destination));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
+ __ StoreConstant(cgen_->ToInteger32(constant_source),
+ cgen_->ToMemOperand(destination));
} else {
- __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
+ Handle<Object> handle = cgen_->ToHandle(constant_source);
+ AllowDeferredHandleDereference smi_object_check;
+ if (handle->IsSmi()) {
+ Object* obj = *handle;
+ DCHECK(!obj->IsHeapObject());
+ __ StoreConstant(reinterpret_cast<intptr_t>(obj),
+ cgen_->ToMemOperand(destination));
+ } else {
+ AcquireSavedValueRegister();
+ __ LoadObject(SavedValueRegister(), handle);
+ __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination));
+ ReleaseSavedValueRegister();
+ }
}
- __ Str(kSavedValue, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
@@ -270,16 +271,16 @@ void LGapResolver::EmitMove(int index) {
if (destination->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(destination), src);
} else {
- ASSERT(destination->IsDoubleStackSlot());
- __ Str(src, cgen_->ToMemOperand(destination));
+ DCHECK(destination->IsDoubleStackSlot());
+ __ Store(src, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
MemOperand src = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
- __ Ldr(cgen_->ToDoubleRegister(destination), src);
+ __ Load(cgen_->ToDoubleRegister(destination), src);
} else {
- ASSERT(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsDoubleStackSlot());
EmitStackSlotMove(index);
}
@@ -291,21 +292,4 @@ void LGapResolver::EmitMove(int index) {
moves_[index].Eliminate();
}
-
-void LGapResolver::EmitStackSlotMove(int index) {
- // We need a temp register to perform a stack slot to stack slot move, and
- // the register must not be involved in breaking cycles.
-
- // Use the Crankshaft double scratch register as the temporary.
- DoubleRegister temp = crankshaft_fp_scratch;
-
- LOperand* src = moves_[index].source();
- LOperand* dst = moves_[index].destination();
-
- ASSERT(src->IsStackSlot());
- ASSERT(dst->IsStackSlot());
- __ Ldr(temp, cgen_->ToMemOperand(src));
- __ Str(temp, cgen_->ToMemOperand(dst));
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
index ae6719073..2eb651b92 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
@@ -5,9 +5,10 @@
#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/arm64/delayed-masm-arm64.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
@@ -15,6 +16,21 @@ namespace internal {
class LCodeGen;
class LGapResolver;
+class DelayedGapMasm : public DelayedMasm {
+ public:
+ DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm)
+ : DelayedMasm(owner, masm, root) {
+ // We use the root register as an extra scratch register.
+ // The root register has two advantages:
+ // - It is not in crankshaft allocatable registers list, so it can't
+ // interfere with the allocatable registers.
+ // - We don't need to push it on the stack, as we can reload it with its
+ // value once we have finish.
+ }
+ void EndDelayedUse();
+};
+
+
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
@@ -43,12 +59,32 @@ class LGapResolver BASE_EMBEDDED {
void EmitMove(int index);
// Emit a move from one stack slot to another.
- void EmitStackSlotMove(int index);
+ void EmitStackSlotMove(int index) {
+ masm_.StackSlotMove(moves_[index].source(), moves_[index].destination());
+ }
// Verify the move list before performing moves.
void Verify();
+ // Registers used to solve cycles.
+ const Register& SavedValueRegister() {
+ DCHECK(!masm_.ScratchRegister().IsAllocatable());
+ return masm_.ScratchRegister();
+ }
+ // The scratch register is used to break cycles and to store constant.
+ // These two methods switch from one mode to the other.
+ void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); }
+ void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); }
+ const FPRegister& SavedFPValueRegister() {
+ // We use the Crankshaft floating-point scratch register to break a cycle
+ // involving double values as the MacroAssembler will not need it for the
+ // operations performed by the gap resolver.
+ DCHECK(!crankshaft_fp_scratch.IsAllocatable());
+ return crankshaft_fp_scratch;
+ }
+
LCodeGen* cgen_;
+ DelayedGapMasm masm_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
@@ -56,10 +92,6 @@ class LGapResolver BASE_EMBEDDED {
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
-
- // We use the root register as a scratch in a few places. When that happens,
- // this flag is set to indicate that it needs to be restored.
- bool need_to_restore_root_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 7c9258a9c..f7c724842 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -7,13 +7,12 @@
#include <ctype.h>
-#include "v8globals.h"
-#include "globals.h"
+#include "src/globals.h"
-#include "arm64/assembler-arm64.h"
-#include "arm64/assembler-arm64-inl.h"
-#include "arm64/macro-assembler-arm64.h"
-#include "arm64/instrument-arm64.h"
+#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/instrument-arm64.h"
+#include "src/arm64/macro-assembler-arm64.h"
namespace v8 {
@@ -38,7 +37,7 @@ MemOperand UntagSmiMemOperand(Register object, int offset) {
Handle<Object> MacroAssembler::CodeObject() {
- ASSERT(!code_object_.is_null());
+ DCHECK(!code_object_.is_null());
return code_object_;
}
@@ -46,8 +45,8 @@ Handle<Object> MacroAssembler::CodeObject() {
void MacroAssembler::And(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, AND);
}
@@ -55,15 +54,15 @@ void MacroAssembler::And(const Register& rd,
void MacroAssembler::Ands(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ANDS);
}
void MacroAssembler::Tst(const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
}
@@ -71,8 +70,8 @@ void MacroAssembler::Tst(const Register& rn,
void MacroAssembler::Bic(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, BIC);
}
@@ -80,8 +79,8 @@ void MacroAssembler::Bic(const Register& rd,
void MacroAssembler::Bics(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, BICS);
}
@@ -89,8 +88,8 @@ void MacroAssembler::Bics(const Register& rd,
void MacroAssembler::Orr(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ORR);
}
@@ -98,8 +97,8 @@ void MacroAssembler::Orr(const Register& rd,
void MacroAssembler::Orn(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ORN);
}
@@ -107,8 +106,8 @@ void MacroAssembler::Orn(const Register& rd,
void MacroAssembler::Eor(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, EOR);
}
@@ -116,8 +115,8 @@ void MacroAssembler::Eor(const Register& rd,
void MacroAssembler::Eon(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, EON);
}
@@ -126,9 +125,9 @@ void MacroAssembler::Ccmp(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ DCHECK(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN);
} else {
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
}
@@ -139,9 +138,9 @@ void MacroAssembler::Ccmn(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ DCHECK(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
} else {
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
}
@@ -151,9 +150,10 @@ void MacroAssembler::Ccmn(const Register& rn,
void MacroAssembler::Add(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
+ DCHECK(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
} else {
AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
}
@@ -162,9 +162,10 @@ void MacroAssembler::Add(const Register& rd,
void MacroAssembler::Adds(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
+ DCHECK(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
} else {
AddSubMacro(rd, rn, operand, SetFlags, ADD);
}
@@ -174,9 +175,10 @@ void MacroAssembler::Adds(const Register& rd,
void MacroAssembler::Sub(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
+ DCHECK(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
} else {
AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
}
@@ -186,9 +188,10 @@ void MacroAssembler::Sub(const Register& rd,
void MacroAssembler::Subs(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
+ DCHECK(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
} else {
AddSubMacro(rd, rn, operand, SetFlags, SUB);
}
@@ -196,23 +199,23 @@ void MacroAssembler::Subs(const Register& rd,
void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Adds(AppropriateZeroRegFor(rn), rn, operand);
}
void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Subs(AppropriateZeroRegFor(rn), rn, operand);
}
void MacroAssembler::Neg(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
if (operand.IsImmediate()) {
- Mov(rd, -operand.immediate());
+ Mov(rd, -operand.ImmediateValue());
} else {
Sub(rd, AppropriateZeroRegFor(rd), operand);
}
@@ -221,7 +224,7 @@ void MacroAssembler::Neg(const Register& rd,
void MacroAssembler::Negs(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Subs(rd, AppropriateZeroRegFor(rd), operand);
}
@@ -229,8 +232,8 @@ void MacroAssembler::Negs(const Register& rd,
void MacroAssembler::Adc(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
}
@@ -238,8 +241,8 @@ void MacroAssembler::Adc(const Register& rd,
void MacroAssembler::Adcs(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
}
@@ -247,8 +250,8 @@ void MacroAssembler::Adcs(const Register& rd,
void MacroAssembler::Sbc(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
}
@@ -256,16 +259,16 @@ void MacroAssembler::Sbc(const Register& rd,
void MacroAssembler::Sbcs(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
}
void MacroAssembler::Ngc(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
Sbc(rd, zr, operand);
}
@@ -273,34 +276,44 @@ void MacroAssembler::Ngc(const Register& rd,
void MacroAssembler::Ngcs(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
Sbcs(rd, zr, operand);
}
void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
Mov(rd, ~imm);
}
#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
- ASSERT(allow_macro_instructions_); \
+ DCHECK(allow_macro_instructions_); \
LoadStoreMacro(REG, addr, OP); \
}
LS_MACRO_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+ void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \
+ const MemOperand& addr) { \
+ DCHECK(allow_macro_instructions_); \
+ LoadStorePairMacro(REG, REG2, addr, OP); \
+ }
+LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
void MacroAssembler::Asr(const Register& rd,
const Register& rn,
unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
asr(rd, rn, shift);
}
@@ -308,8 +321,8 @@ void MacroAssembler::Asr(const Register& rd,
void MacroAssembler::Asr(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
asrv(rd, rn, rm);
}
@@ -321,7 +334,7 @@ void MacroAssembler::B(Label* label) {
void MacroAssembler::B(Condition cond, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
B(label, cond);
}
@@ -330,8 +343,8 @@ void MacroAssembler::Bfi(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
bfi(rd, rn, lsb, width);
}
@@ -340,40 +353,40 @@ void MacroAssembler::Bfxil(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
bfxil(rd, rn, lsb, width);
}
void MacroAssembler::Bind(Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
bind(label);
}
void MacroAssembler::Bl(Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
bl(label);
}
void MacroAssembler::Blr(const Register& xn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!xn.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!xn.IsZero());
blr(xn);
}
void MacroAssembler::Br(const Register& xn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!xn.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!xn.IsZero());
br(xn);
}
void MacroAssembler::Brk(int code) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
brk(code);
}
@@ -381,9 +394,9 @@ void MacroAssembler::Brk(int code) {
void MacroAssembler::Cinc(const Register& rd,
const Register& rn,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
cinc(rd, rn, cond);
}
@@ -391,23 +404,23 @@ void MacroAssembler::Cinc(const Register& rd,
void MacroAssembler::Cinv(const Register& rd,
const Register& rn,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
cinv(rd, rn, cond);
}
void MacroAssembler::Cls(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
cls(rd, rn);
}
void MacroAssembler::Clz(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
clz(rd, rn);
}
@@ -415,9 +428,9 @@ void MacroAssembler::Clz(const Register& rd, const Register& rn) {
void MacroAssembler::Cneg(const Register& rd,
const Register& rn,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
cneg(rd, rn, cond);
}
@@ -426,9 +439,9 @@ void MacroAssembler::Cneg(const Register& rd,
// due to the truncation side-effect when used on W registers.
void MacroAssembler::CzeroX(const Register& rd,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsSP() && rd.Is64Bits());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsSP() && rd.Is64Bits());
+ DCHECK((cond != al) && (cond != nv));
csel(rd, xzr, rd, cond);
}
@@ -438,10 +451,10 @@ void MacroAssembler::CzeroX(const Register& rd,
void MacroAssembler::CmovX(const Register& rd,
const Register& rn,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsSP());
- ASSERT(rd.Is64Bits() && rn.Is64Bits());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsSP());
+ DCHECK(rd.Is64Bits() && rn.Is64Bits());
+ DCHECK((cond != al) && (cond != nv));
if (!rd.is(rn)) {
csel(rd, rn, rd, cond);
}
@@ -449,17 +462,17 @@ void MacroAssembler::CmovX(const Register& rd,
void MacroAssembler::Cset(const Register& rd, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
cset(rd, cond);
}
void MacroAssembler::Csetm(const Register& rd, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
csetm(rd, cond);
}
@@ -468,9 +481,9 @@ void MacroAssembler::Csinc(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
csinc(rd, rn, rm, cond);
}
@@ -479,9 +492,9 @@ void MacroAssembler::Csinv(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
csinv(rd, rn, rm, cond);
}
@@ -490,27 +503,27 @@ void MacroAssembler::Csneg(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
csneg(rd, rn, rm, cond);
}
void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
dmb(domain, type);
}
void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
dsb(domain, type);
}
void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
debug(message, code, params);
}
@@ -519,14 +532,14 @@ void MacroAssembler::Extr(const Register& rd,
const Register& rn,
const Register& rm,
unsigned lsb) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
extr(rd, rn, rm, lsb);
}
void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fabs(fd, fn);
}
@@ -534,7 +547,7 @@ void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
void MacroAssembler::Fadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fadd(fd, fn, fm);
}
@@ -543,20 +556,20 @@ void MacroAssembler::Fccmp(const FPRegister& fn,
const FPRegister& fm,
StatusFlags nzcv,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK((cond != al) && (cond != nv));
fccmp(fn, fm, nzcv, cond);
}
void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fcmp(fn, fm);
}
void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
if (value != 0.0) {
UseScratchRegisterScope temps(this);
FPRegister tmp = temps.AcquireSameSizeAs(fn);
@@ -572,68 +585,68 @@ void MacroAssembler::Fcsel(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK((cond != al) && (cond != nv));
fcsel(fd, fn, fm, cond);
}
void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fcvt(fd, fn);
}
void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtas(rd, fn);
}
void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtau(rd, fn);
}
void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtms(rd, fn);
}
void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtmu(rd, fn);
}
void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtns(rd, fn);
}
void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtnu(rd, fn);
}
void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtzs(rd, fn);
}
void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fcvtzu(rd, fn);
}
@@ -641,7 +654,7 @@ void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
void MacroAssembler::Fdiv(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fdiv(fd, fn, fm);
}
@@ -650,7 +663,7 @@ void MacroAssembler::Fmadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fmadd(fd, fn, fm, fa);
}
@@ -658,7 +671,7 @@ void MacroAssembler::Fmadd(const FPRegister& fd,
void MacroAssembler::Fmax(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fmax(fd, fn, fm);
}
@@ -666,7 +679,7 @@ void MacroAssembler::Fmax(const FPRegister& fd,
void MacroAssembler::Fmaxnm(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fmaxnm(fd, fn, fm);
}
@@ -674,7 +687,7 @@ void MacroAssembler::Fmaxnm(const FPRegister& fd,
void MacroAssembler::Fmin(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fmin(fd, fn, fm);
}
@@ -682,13 +695,13 @@ void MacroAssembler::Fmin(const FPRegister& fd,
void MacroAssembler::Fminnm(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fminnm(fd, fn, fm);
}
void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
// Only emit an instruction if fd and fn are different, and they are both D
// registers. fmov(s0, s0) is not a no-op because it clears the top word of
// d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
@@ -700,41 +713,37 @@ void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
void MacroAssembler::Fmov(FPRegister fd, Register rn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fmov(fd, rn);
}
void MacroAssembler::Fmov(FPRegister fd, double imm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
if (fd.Is32Bits()) {
Fmov(fd, static_cast<float>(imm));
return;
}
- ASSERT(fd.Is64Bits());
+ DCHECK(fd.Is64Bits());
if (IsImmFP64(imm)) {
fmov(fd, imm);
} else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
fmov(fd, xzr);
} else {
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- // TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm).
- Mov(tmp, double_to_rawbits(imm));
- Fmov(fd, tmp);
+ Ldr(fd, imm);
}
}
void MacroAssembler::Fmov(FPRegister fd, float imm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
if (fd.Is64Bits()) {
Fmov(fd, static_cast<double>(imm));
return;
}
- ASSERT(fd.Is32Bits());
+ DCHECK(fd.Is32Bits());
if (IsImmFP32(imm)) {
fmov(fd, imm);
} else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
@@ -750,8 +759,8 @@ void MacroAssembler::Fmov(FPRegister fd, float imm) {
void MacroAssembler::Fmov(Register rd, FPRegister fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
fmov(rd, fn);
}
@@ -760,7 +769,7 @@ void MacroAssembler::Fmsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fmsub(fd, fn, fm, fa);
}
@@ -768,13 +777,13 @@ void MacroAssembler::Fmsub(const FPRegister& fd,
void MacroAssembler::Fmul(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fmul(fd, fn, fm);
}
void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fneg(fd, fn);
}
@@ -783,7 +792,7 @@ void MacroAssembler::Fnmadd(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fnmadd(fd, fn, fm, fa);
}
@@ -792,37 +801,37 @@ void MacroAssembler::Fnmsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm,
const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fnmsub(fd, fn, fm, fa);
}
void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
frinta(fd, fn);
}
void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
frintm(fd, fn);
}
void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
frintn(fd, fn);
}
void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
frintz(fd, fn);
}
void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fsqrt(fd, fn);
}
@@ -830,25 +839,25 @@ void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
void MacroAssembler::Fsub(const FPRegister& fd,
const FPRegister& fn,
const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
fsub(fd, fn, fm);
}
void MacroAssembler::Hint(SystemHint code) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
hint(code);
}
void MacroAssembler::Hlt(int code) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
hlt(code);
}
void MacroAssembler::Isb() {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
isb();
}
@@ -856,49 +865,30 @@ void MacroAssembler::Isb() {
void MacroAssembler::Ldnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& src) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!AreAliased(rt, rt2));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!AreAliased(rt, rt2));
ldnp(rt, rt2, src);
}
-void MacroAssembler::Ldp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!AreAliased(rt, rt2));
- ldp(rt, rt2, src);
-}
-
-
-void MacroAssembler::Ldpsw(const Register& rt,
- const Register& rt2,
- const MemOperand& src) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
- ASSERT(!rt2.IsZero());
- ldpsw(rt, rt2, src);
-}
-
-
-void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
- ASSERT(allow_macro_instructions_);
- ldr(ft, imm);
+void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
+ DCHECK(allow_macro_instructions_);
+ ldr(rt, imm);
}
-void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
- ldr(rt, imm);
+void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
+ DCHECK(allow_macro_instructions_);
+ DCHECK(rt.Is64Bits());
+ ldr(rt, Immediate(double_to_rawbits(imm)));
}
void MacroAssembler::Lsl(const Register& rd,
const Register& rn,
unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
lsl(rd, rn, shift);
}
@@ -906,8 +896,8 @@ void MacroAssembler::Lsl(const Register& rd,
void MacroAssembler::Lsl(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
lslv(rd, rn, rm);
}
@@ -915,8 +905,8 @@ void MacroAssembler::Lsl(const Register& rd,
void MacroAssembler::Lsr(const Register& rd,
const Register& rn,
unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
lsr(rd, rn, shift);
}
@@ -924,8 +914,8 @@ void MacroAssembler::Lsr(const Register& rd,
void MacroAssembler::Lsr(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
lsrv(rd, rn, rm);
}
@@ -934,8 +924,8 @@ void MacroAssembler::Madd(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
madd(rd, rn, rm, ra);
}
@@ -943,15 +933,15 @@ void MacroAssembler::Madd(const Register& rd,
void MacroAssembler::Mneg(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
mneg(rd, rn, rm);
}
void MacroAssembler::Mov(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
// Emit a register move only if the registers are distinct, or if they are
// not X registers. Note that mov(w0, w0) is not a no-op because it clears
// the top word of x0.
@@ -962,21 +952,21 @@ void MacroAssembler::Mov(const Register& rd, const Register& rn) {
void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
movk(rd, imm, shift);
}
void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rt.IsZero());
mrs(rt, sysreg);
}
void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
msr(sysreg, rt);
}
@@ -985,8 +975,8 @@ void MacroAssembler::Msub(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
msub(rd, rn, rm, ra);
}
@@ -994,44 +984,44 @@ void MacroAssembler::Msub(const Register& rd,
void MacroAssembler::Mul(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
mul(rd, rn, rm);
}
void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
rbit(rd, rn);
}
void MacroAssembler::Ret(const Register& xn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!xn.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!xn.IsZero());
ret(xn);
CheckVeneerPool(false, false);
}
void MacroAssembler::Rev(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
rev(rd, rn);
}
void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
rev16(rd, rn);
}
void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
rev32(rd, rn);
}
@@ -1039,8 +1029,8 @@ void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
void MacroAssembler::Ror(const Register& rd,
const Register& rs,
unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
ror(rd, rs, shift);
}
@@ -1048,8 +1038,8 @@ void MacroAssembler::Ror(const Register& rd,
void MacroAssembler::Ror(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
rorv(rd, rn, rm);
}
@@ -1058,8 +1048,8 @@ void MacroAssembler::Sbfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
sbfiz(rd, rn, lsb, width);
}
@@ -1068,8 +1058,8 @@ void MacroAssembler::Sbfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
sbfx(rd, rn, lsb, width);
}
@@ -1077,7 +1067,7 @@ void MacroAssembler::Sbfx(const Register& rd,
void MacroAssembler::Scvtf(const FPRegister& fd,
const Register& rn,
unsigned fbits) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
scvtf(fd, rn, fbits);
}
@@ -1085,8 +1075,8 @@ void MacroAssembler::Scvtf(const FPRegister& fd,
void MacroAssembler::Sdiv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
sdiv(rd, rn, rm);
}
@@ -1095,8 +1085,8 @@ void MacroAssembler::Smaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
smaddl(rd, rn, rm, ra);
}
@@ -1105,8 +1095,8 @@ void MacroAssembler::Smsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
smsubl(rd, rn, rm, ra);
}
@@ -1114,8 +1104,8 @@ void MacroAssembler::Smsubl(const Register& rd,
void MacroAssembler::Smull(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
smull(rd, rn, rm);
}
@@ -1123,8 +1113,8 @@ void MacroAssembler::Smull(const Register& rd,
void MacroAssembler::Smulh(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
smulh(rd, rn, rm);
}
@@ -1132,36 +1122,28 @@ void MacroAssembler::Smulh(const Register& rd,
void MacroAssembler::Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
stnp(rt, rt2, dst);
}
-void MacroAssembler::Stp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- ASSERT(allow_macro_instructions_);
- stp(rt, rt2, dst);
-}
-
-
void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
sxtb(rd, rn);
}
void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
sxth(rd, rn);
}
void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
sxtw(rd, rn);
}
@@ -1170,8 +1152,8 @@ void MacroAssembler::Ubfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
ubfiz(rd, rn, lsb, width);
}
@@ -1180,8 +1162,8 @@ void MacroAssembler::Ubfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
ubfx(rd, rn, lsb, width);
}
@@ -1189,7 +1171,7 @@ void MacroAssembler::Ubfx(const Register& rd,
void MacroAssembler::Ucvtf(const FPRegister& fd,
const Register& rn,
unsigned fbits) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
ucvtf(fd, rn, fbits);
}
@@ -1197,8 +1179,8 @@ void MacroAssembler::Ucvtf(const FPRegister& fd,
void MacroAssembler::Udiv(const Register& rd,
const Register& rn,
const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
udiv(rd, rn, rm);
}
@@ -1207,8 +1189,8 @@ void MacroAssembler::Umaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
umaddl(rd, rn, rm, ra);
}
@@ -1217,58 +1199,87 @@ void MacroAssembler::Umsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
umsubl(rd, rn, rm, ra);
}
void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
uxtb(rd, rn);
}
void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
uxth(rd, rn);
}
void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
uxtw(rd, rn);
}
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
- ASSERT(!csp.Is(sp_));
- // TODO(jbramley): Several callers rely on this not using scratch registers,
- // so we use the assembler directly here. However, this means that large
- // immediate values of 'space' cannot be handled cleanly. (Only 24-bits
- // immediates or values of 'space' that can be encoded in one instruction are
- // accepted.) Once we implement our flexible scratch register idea, we could
- // greatly simplify this function.
- InstructionAccurateScope scope(this);
- if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
- // The subtract instruction supports a 12-bit immediate, shifted left by
- // zero or 12 bits. So, in two instructions, we can subtract any immediate
- // between zero and (1 << 24) - 1.
- int64_t imm = space.immediate();
- ASSERT(is_uint24(imm));
-
- int64_t imm_top_12_bits = imm >> 12;
- sub(csp, StackPointer(), imm_top_12_bits << 12);
- imm -= imm_top_12_bits << 12;
- if (imm > 0) {
- sub(csp, csp, imm);
+ DCHECK(!csp.Is(sp_));
+ if (!TmpList()->IsEmpty()) {
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Sub(temp, StackPointer(), space);
+ Bic(csp, temp, 0xf);
+ } else {
+ Sub(csp, StackPointer(), space);
}
} else {
- sub(csp, StackPointer(), space);
+ // TODO(jbramley): Several callers rely on this not using scratch
+ // registers, so we use the assembler directly here. However, this means
+ // that large immediate values of 'space' cannot be handled cleanly. (Only
+ // 24-bits immediates or values of 'space' that can be encoded in one
+ // instruction are accepted.) Once we implement our flexible scratch
+ // register idea, we could greatly simplify this function.
+ InstructionAccurateScope scope(this);
+ DCHECK(space.IsImmediate());
+ // Align to 16 bytes.
+ uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
+ DCHECK(is_uint24(imm));
+
+ Register source = StackPointer();
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ bic(csp, source, 0xf);
+ source = csp;
+ }
+ if (!is_uint12(imm)) {
+ int64_t imm_top_12_bits = imm >> 12;
+ sub(csp, source, imm_top_12_bits << 12);
+ source = csp;
+ imm -= imm_top_12_bits << 12;
+ }
+ if (imm > 0) {
+ sub(csp, source, imm);
+ }
}
+ AssertStackConsistency();
+}
+
+
+void MacroAssembler::SyncSystemStackPointer() {
+ DCHECK(emit_debug_code());
+ DCHECK(!csp.Is(sp_));
+ { InstructionAccurateScope scope(this);
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ bic(csp, StackPointer(), 0xf);
+ } else {
+ mov(csp, StackPointer());
+ }
+ }
+ AssertStackConsistency();
}
@@ -1280,7 +1291,9 @@ void MacroAssembler::InitializeRootRegister() {
void MacroAssembler::SmiTag(Register dst, Register src) {
- ASSERT(dst.Is64Bits() && src.Is64Bits());
+ STATIC_ASSERT(kXRegSizeInBits ==
+ static_cast<unsigned>(kSmiShift + kSmiValueSize));
+ DCHECK(dst.Is64Bits() && src.Is64Bits());
Lsl(dst, src, kSmiShift);
}
@@ -1289,7 +1302,9 @@ void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
void MacroAssembler::SmiUntag(Register dst, Register src) {
- ASSERT(dst.Is64Bits() && src.Is64Bits());
+ STATIC_ASSERT(kXRegSizeInBits ==
+ static_cast<unsigned>(kSmiShift + kSmiValueSize));
+ DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
@@ -1303,7 +1318,7 @@ void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
void MacroAssembler::SmiUntagToDouble(FPRegister dst,
Register src,
UntagMode mode) {
- ASSERT(dst.Is64Bits() && src.Is64Bits());
+ DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
AssertSmi(src);
}
@@ -1314,7 +1329,7 @@ void MacroAssembler::SmiUntagToDouble(FPRegister dst,
void MacroAssembler::SmiUntagToFloat(FPRegister dst,
Register src,
UntagMode mode) {
- ASSERT(dst.Is32Bits() && src.Is64Bits());
+ DCHECK(dst.Is32Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
AssertSmi(src);
}
@@ -1322,6 +1337,22 @@ void MacroAssembler::SmiUntagToFloat(FPRegister dst,
}
+void MacroAssembler::SmiTagAndPush(Register src) {
+ STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
+ (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
+ (kSmiTag == 0));
+ Push(src.W(), wzr);
+}
+
+
+void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
+ STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
+ (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
+ (kSmiTag == 0));
+ Push(src1.W(), wzr, src2.W(), wzr);
+}
+
+
void MacroAssembler::JumpIfSmi(Register value,
Label* smi_label,
Label* not_smi_label) {
@@ -1333,7 +1364,7 @@ void MacroAssembler::JumpIfSmi(Register value,
B(not_smi_label);
}
} else {
- ASSERT(not_smi_label);
+ DCHECK(not_smi_label);
Tbnz(value, 0, not_smi_label);
}
}
@@ -1450,7 +1481,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
STATIC_ASSERT(kStringTag == 0);
- ASSERT((string != NULL) || (not_string != NULL));
+ DCHECK((string != NULL) || (not_string != NULL));
if (string == NULL) {
TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
} else if (not_string == NULL) {
@@ -1478,7 +1509,7 @@ void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
}
if (csp.Is(StackPointer())) {
- ASSERT(size % 16 == 0);
+ DCHECK(size % 16 == 0);
} else {
BumpSystemStackPointer(size);
}
@@ -1489,7 +1520,7 @@ void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- ASSERT(IsPowerOf2(unit_size));
+ DCHECK(IsPowerOf2(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1507,7 +1538,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
- ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
+ DCHECK(unit_size == 0 || IsPowerOf2(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
@@ -1535,19 +1566,19 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
Add(StackPointer(), StackPointer(), size);
if (csp.Is(StackPointer())) {
- ASSERT(size % 16 == 0);
+ DCHECK(size % 16 == 0);
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
+ SyncSystemStackPointer();
}
}
void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- ASSERT(IsPowerOf2(unit_size));
+ DCHECK(IsPowerOf2(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1562,13 +1593,13 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
+ SyncSystemStackPointer();
}
}
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
- ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
+ DCHECK(unit_size == 0 || IsPowerOf2(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
@@ -1584,7 +1615,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
+ SyncSystemStackPointer();
}
}
@@ -1593,7 +1624,7 @@ void MacroAssembler::CompareAndBranch(const Register& lhs,
const Operand& rhs,
Condition cond,
Label* label) {
- if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
+ if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne))) {
if (cond == eq) {
Cbz(lhs, label);
@@ -1611,7 +1642,7 @@ void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
- ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ DCHECK(CountSetBits(bit_pattern, bits) > 0);
if (CountSetBits(bit_pattern, bits) == 1) {
Tbnz(reg, MaskToBit(bit_pattern), label);
} else {
@@ -1625,7 +1656,7 @@ void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
- ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ DCHECK(CountSetBits(bit_pattern, bits) > 0);
if (CountSetBits(bit_pattern, bits) == 1) {
Tbz(reg, MaskToBit(bit_pattern), label);
} else {
@@ -1636,7 +1667,7 @@ void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
void MacroAssembler::InlineData(uint64_t data) {
- ASSERT(is_uint16(data));
+ DCHECK(is_uint16(data));
InstructionAccurateScope scope(this, 1);
movz(xzr, data);
}
@@ -1655,11 +1686,11 @@ void MacroAssembler::DisableInstrumentation() {
void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
- ASSERT(strlen(marker_name) == 2);
+ DCHECK(strlen(marker_name) == 2);
// We allow only printable characters in the marker names. Unprintable
// characters are reserved for controlling features of the instrumentation.
- ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+ DCHECK(isprint(marker_name[0]) && isprint(marker_name[1]));
InstructionAccurateScope scope(this, 1);
movn(xzr, (marker_name[1] << 8) | marker_name[0]);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 352f3c2ac..658497b9f 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
@@ -56,25 +56,31 @@ void MacroAssembler::LogicalMacro(const Register& rd,
LogicalOp op) {
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation(isolate())) {
+ if (operand.NeedsRelocation(this)) {
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
Logical(rd, rn, temp, op);
} else if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
+ int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
- ASSERT(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
if ((op & NOT) == NOT) {
op = static_cast<LogicalOp>(op & ~NOT);
immediate = ~immediate;
- if (rd.Is32Bits()) {
- immediate &= kWRegMask;
- }
}
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are consistent.
+ DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
+ ((immediate >> kWRegSizeInBits) == -1));
+ immediate &= kWRegMask;
+ }
+
+ DCHECK(rd.Is64Bits() || is_uint32(immediate));
+
// Special cases for all set or all clear immediates.
if (immediate == 0) {
switch (op) {
@@ -118,23 +124,24 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, immediate);
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
if (rd.Is(csp)) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
- Logical(temp, rn, temp, op);
+ Logical(temp, rn, imm_operand, op);
Mov(csp, temp);
+ AssertStackConsistency();
} else {
- Logical(rd, rn, temp, op);
+ Logical(rd, rn, imm_operand, op);
}
}
} else if (operand.IsExtendedRegister()) {
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports shift <= 4. We want to support exactly the
// same modes here.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ DCHECK(operand.shift_amount() <= 4);
+ DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
@@ -143,16 +150,16 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else {
// The operand can be encoded in the instruction.
- ASSERT(operand.IsShiftedRegister());
+ DCHECK(operand.IsShiftedRegister());
Logical(rd, rn, operand, op);
}
}
void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ DCHECK(!rd.IsZero());
// TODO(all) extend to support more immediates.
//
@@ -171,20 +178,11 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
// applying move-keep operations to move-zero and move-inverted initial
// values.
- unsigned reg_size = rd.SizeInBits();
- unsigned n, imm_s, imm_r;
- if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move zero instruction. Movz can't
- // write to the stack pointer.
- movz(rd, imm);
- } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move inverted instruction. Movn can't
- // write to the stack pointer.
- movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
- } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
- // Immediate can be represented in a logical orr instruction.
- LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
- } else {
+ // Try to move the immediate in one instruction, and if that fails, switch to
+ // using multiple instructions.
+ if (!TryOneInstrMoveImmediate(rd, imm)) {
+ unsigned reg_size = rd.SizeInBits();
+
// Generic immediate case. Imm will be represented by
// [imm3, imm2, imm1, imm0], where each imm is 16 bits.
// A move-zero or move-inverted is generated for the first non-zero or
@@ -207,7 +205,7 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
// Iterate through the halfwords. Use movn/movz for the first non-ignored
// halfword, and movk for subsequent halfwords.
- ASSERT((reg_size % 16) == 0);
+ DCHECK((reg_size % 16) == 0);
bool first_mov_done = false;
for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
@@ -225,12 +223,13 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
}
}
}
- ASSERT(first_mov_done);
+ DCHECK(first_mov_done);
// Move the temporary if the original destination register was the stack
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
+ AssertStackConsistency();
}
}
}
@@ -239,20 +238,20 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
void MacroAssembler::Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
// Provide a swap register for instructions that need to write into the
// system stack pointer (and can't do this inherently).
UseScratchRegisterScope temps(this);
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
- if (operand.NeedsRelocation(isolate())) {
- LoadRelocated(dst, operand);
+ if (operand.NeedsRelocation(this)) {
+ Ldr(dst, operand.immediate());
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
- Mov(dst, operand.immediate());
+ Mov(dst, operand.ImmediateValue());
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Emit a shift instruction if moving a shifted register. This operation
@@ -286,22 +285,22 @@ void MacroAssembler::Mov(const Register& rd,
// Copy the result to the system stack pointer.
if (!dst.Is(rd)) {
- ASSERT(rd.IsSP());
+ DCHECK(rd.IsSP());
Assembler::mov(rd, dst);
}
}
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
- if (operand.NeedsRelocation(isolate())) {
- LoadRelocated(rd, operand);
+ if (operand.NeedsRelocation(this)) {
+ Ldr(rd, operand.immediate());
mvn(rd, rd);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
- Mov(rd, ~operand.immediate());
+ Mov(rd, ~operand.ImmediateValue());
} else if (operand.IsExtendedRegister()) {
// Emit two instructions for the extend case. This differs from Mov, as
@@ -317,7 +316,7 @@ void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size % 8) == 0);
+ DCHECK((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
@@ -332,7 +331,7 @@ unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+ DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
@@ -349,15 +348,16 @@ void MacroAssembler::ConditionalCompareMacro(const Register& rn,
StatusFlags nzcv,
Condition cond,
ConditionalCompareOp op) {
- ASSERT((cond != al) && (cond != nv));
- if (operand.NeedsRelocation(isolate())) {
+ DCHECK((cond != al) && (cond != nv));
+ if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
ConditionalCompareMacro(rn, temp, nzcv, cond, op);
} else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
- (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ (operand.IsImmediate() &&
+ IsImmConditionalCompare(operand.ImmediateValue()))) {
// The immediate can be encoded in the instruction, or the operand is an
// unshifted register: call the assembler.
ConditionalCompare(rn, operand, nzcv, cond, op);
@@ -377,13 +377,13 @@ void MacroAssembler::Csel(const Register& rd,
const Register& rn,
const Operand& operand,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
if (operand.IsImmediate()) {
// Immediate argument. Handle special cases of 0, 1 and -1 using zero
// register.
- int64_t imm = operand.immediate();
+ int64_t imm = operand.ImmediateValue();
Register zr = AppropriateZeroRegFor(rn);
if (imm == 0) {
csel(rd, rn, zr, cond);
@@ -394,7 +394,7 @@ void MacroAssembler::Csel(const Register& rd,
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, operand.immediate());
+ Mov(temp, imm);
csel(rd, rn, temp, cond);
}
} else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
@@ -410,29 +410,96 @@ void MacroAssembler::Csel(const Register& rd,
}
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+ int64_t imm) {
+ unsigned n, imm_s, imm_r;
+ int reg_size = dst.SizeInBits();
+ if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't write
+ // to the stack pointer.
+ movz(dst, imm);
+ return true;
+ } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move not instruction. Movn can't write
+ // to the stack pointer.
+ movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ return true;
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+ return true;
+ }
+ return false;
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm) {
+ int reg_size = dst.SizeInBits();
+
+ // Encode the immediate in a single move instruction, if possible.
+ if (TryOneInstrMoveImmediate(dst, imm)) {
+ // The move was successful; nothing to do here.
+ } else {
+ // Pre-shift the immediate to the least-significant bits of the register.
+ int shift_low = CountTrailingZeros(imm, reg_size);
+ int64_t imm_low = imm >> shift_low;
+
+ // Pre-shift the immediate to the most-significant bits of the register. We
+ // insert set bits in the least-significant bits, as this creates a
+ // different immediate that may be encodable using movn or orr-immediate.
+ // If this new immediate is encodable, the set bits will be eliminated by
+ // the post shift on the following instruction.
+ int shift_high = CountLeadingZeros(imm, reg_size);
+ int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
+
+ if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ // The new immediate has been moved into the destination's low bits:
+ // return a new leftward-shifting operand.
+ return Operand(dst, LSL, shift_low);
+ } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ // The new immediate has been moved into the destination's high bits:
+ // return a new rightward-shifting operand.
+ return Operand(dst, LSR, shift_high);
+ } else {
+ // Use the generic move operation to set up the immediate.
+ Mov(dst, imm);
+ }
+ }
+ return Operand(dst);
+}
+
+
void MacroAssembler::AddSubMacro(const Register& rd,
const Register& rn,
const Operand& operand,
FlagsUpdate S,
AddSubOp op) {
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
- !operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) {
+ !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
// The instruction would be a nop. Avoid generating useless code.
return;
}
- if (operand.NeedsRelocation(isolate())) {
+ if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
AddSubMacro(rd, rn, temp, S, op);
- } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
- (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ } else if ((operand.IsImmediate() &&
+ !IsImmAddSub(operand.ImmediateValue())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, operand);
- AddSub(rd, rn, temp, S, op);
+ if (operand.IsImmediate()) {
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
+ AddSub(rd, rn, imm_operand, S, op);
+ } else {
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ }
} else {
AddSub(rd, rn, operand, S, op);
}
@@ -444,12 +511,12 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
const Operand& operand,
FlagsUpdate S,
AddSubWithCarryOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation(isolate())) {
+ if (operand.NeedsRelocation(this)) {
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
AddSubWithCarryMacro(rd, rn, temp, S, op);
} else if (operand.IsImmediate() ||
@@ -461,9 +528,9 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Add/sub with carry (shifted register).
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
- ASSERT(operand.shift() != ROR);
- ASSERT(is_uintn(operand.shift_amount(),
+ DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
+ DCHECK(operand.shift() != ROR);
+ DCHECK(is_uintn(operand.shift_amount(),
rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
: kWRegSizeInBitsLog2));
Register temp = temps.AcquireSameSizeAs(rn);
@@ -472,11 +539,11 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
} else if (operand.IsExtendedRegister()) {
// Add/sub with carry (extended register).
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports a shift <= 4. We want to support exactly the
// same modes.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ DCHECK(operand.shift_amount() <= 4);
+ DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
@@ -521,11 +588,44 @@ void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
}
}
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // TODO(all): Should we support register offset for load-store-pair?
+ DCHECK(!addr.IsRegisterOffset());
+
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSPairDataSize(op);
+
+ // Check if the offset fits in the immediate field of the appropriate
+ // instruction. If not, emit two instructions to perform the operation.
+ if (IsImmLSPair(offset, size)) {
+ // Encodable in one load/store pair instruction.
+ LoadStorePair(rt, rt2, addr, op);
+ } else {
+ Register base = addr.base();
+ if (addr.IsImmediateOffset()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(base);
+ Add(temp, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(temp), op);
+ } else if (addr.IsPostIndex()) {
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ Add(base, base, offset);
+ } else {
+ DCHECK(addr.IsPreIndex());
+ Add(base, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ }
+ }
+}
+
void MacroAssembler::Load(const Register& rt,
const MemOperand& addr,
Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
Ldrsb(rt, addr);
@@ -538,7 +638,7 @@ void MacroAssembler::Load(const Register& rt,
} else if (r.IsInteger32()) {
Ldr(rt.W(), addr);
} else {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
Ldr(rt, addr);
}
}
@@ -547,7 +647,7 @@ void MacroAssembler::Load(const Register& rt,
void MacroAssembler::Store(const Register& rt,
const MemOperand& addr,
Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
Strb(rt, addr);
@@ -556,7 +656,7 @@ void MacroAssembler::Store(const Register& rt,
} else if (r.IsInteger32()) {
Str(rt.W(), addr);
} else {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
if (r.IsHeapObject()) {
AssertNotSmi(rt);
} else if (r.IsSmi()) {
@@ -594,30 +694,29 @@ bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
if (hint == kAdrNear) {
adr(rd, label);
return;
}
- ASSERT(hint == kAdrFar);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- ASSERT(!AreAliased(rd, scratch));
-
+ DCHECK(hint == kAdrFar);
if (label->is_bound()) {
int label_offset = label->pos() - pc_offset();
if (Instruction::IsValidPCRelOffset(label_offset)) {
adr(rd, label);
} else {
- ASSERT(label_offset <= 0);
+ DCHECK(label_offset <= 0);
int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
adr(rd, min_adr_offset);
Add(rd, rd, label_offset - min_adr_offset);
}
} else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
InstructionAccurateScope scope(
this, PatchingAssembler::kAdrFarPatchableNInstrs);
adr(rd, label);
@@ -625,13 +724,12 @@ void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
nop(ADR_FAR_NOP);
}
movz(scratch, 0);
- add(rd, rd, scratch);
}
}
void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
- ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
+ DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
(bit == -1 || type >= kBranchTypeFirstUsingBit));
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
B(static_cast<Condition>(type), label);
@@ -651,15 +749,15 @@ void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
void MacroAssembler::B(Label* label, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK((cond != al) && (cond != nv));
Label done;
bool need_extra_instructions =
NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
if (need_extra_instructions) {
- b(&done, InvertCondition(cond));
+ b(&done, NegateCondition(cond));
B(label);
} else {
b(label, cond);
@@ -669,7 +767,7 @@ void MacroAssembler::B(Label* label, Condition cond) {
void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
@@ -686,7 +784,7 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
@@ -703,7 +801,7 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
void MacroAssembler::Cbnz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
@@ -720,7 +818,7 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) {
void MacroAssembler::Cbz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
@@ -742,8 +840,8 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) {
void MacroAssembler::Abs(const Register& rd, const Register& rm,
Label* is_not_representable,
Label* is_representable) {
- ASSERT(allow_macro_instructions_);
- ASSERT(AreSameSizeAndType(rd, rm));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(AreSameSizeAndType(rd, rm));
Cmp(rm, 1);
Cneg(rd, rm, lt);
@@ -767,12 +865,12 @@ void MacroAssembler::Abs(const Register& rd, const Register& rm,
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
- PrepareForPush(count, size);
+ PushPreamble(count, size);
PushHelper(count, size, src0, src1, src2, src3);
}
@@ -781,12 +879,12 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5,
const CPURegister& src6, const CPURegister& src7) {
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
- PrepareForPush(count, size);
+ PushPreamble(count, size);
PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7);
}
@@ -796,29 +894,36 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
// instruction, not even into the zero register.
- ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(dst0.IsValid());
+ DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ DCHECK(dst0.IsValid());
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
- PrepareForPop(count, size);
PopHelper(count, size, dst0, dst1, dst2, dst3);
+ PopPostamble(count, size);
+}
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
+
+void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
+ int size = src0.SizeInBytes() + src1.SizeInBytes();
+
+ PushPreamble(size);
+ // Reserve room for src0 and push src1.
+ str(src1, MemOperand(StackPointer(), -size, PreIndex));
+ // Fill the gap with src0.
+ str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
}
-void MacroAssembler::PushPopQueue::PushQueued() {
+void MacroAssembler::PushPopQueue::PushQueued(
+ PreambleDirective preamble_directive) {
if (queued_.empty()) return;
- masm_->PrepareForPush(size_);
+ if (preamble_directive == WITH_PREAMBLE) {
+ masm_->PushPreamble(size_);
+ }
int count = queued_.size();
int index = 0;
@@ -843,8 +948,6 @@ void MacroAssembler::PushPopQueue::PushQueued() {
void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return;
- masm_->PrepareForPop(size_);
-
int count = queued_.size();
int index = 0;
while (index < count) {
@@ -861,6 +964,7 @@ void MacroAssembler::PushPopQueue::PopQueued() {
batch[0], batch[1], batch[2], batch[3]);
}
+ masm_->PopPostamble(size_);
queued_.clear();
}
@@ -868,7 +972,7 @@ void MacroAssembler::PushPopQueue::PopQueued() {
void MacroAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- PrepareForPush(registers.Count(), size);
+ PushPreamble(registers.Count(), size);
// Push up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for csp.
@@ -887,7 +991,6 @@ void MacroAssembler::PushCPURegList(CPURegList registers) {
void MacroAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- PrepareForPop(registers.Count(), size);
// Pop up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in
// order to maintain the 16-byte alignment for csp.
@@ -900,20 +1003,14 @@ void MacroAssembler::PopCPURegList(CPURegList registers) {
int count = count_before - registers.Count();
PopHelper(count, size, dst0, dst1, dst2, dst3);
}
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
+ PopPostamble(registers.Count(), size);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
int size = src.SizeInBytes();
- PrepareForPush(count, size);
+ PushPreamble(count, size);
if (FLAG_optimize_for_size && count > 8) {
UseScratchRegisterScope temps(this);
@@ -944,12 +1041,12 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
PushHelper(1, size, src, NoReg, NoReg, NoReg);
count -= 1;
}
- ASSERT(count == 0);
+ DCHECK(count == 0);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
- PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
+ PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count);
@@ -1002,22 +1099,22 @@ void MacroAssembler::PushHelper(int count, int size,
// Ensure that we don't unintentially modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(size == src0.SizeInBytes());
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
+ DCHECK(size == src0.SizeInBytes());
// When pushing multiple registers, the store order is chosen such that
// Push(a, b) is equivalent to Push(a) followed by Push(b).
switch (count) {
case 1:
- ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
break;
case 2:
- ASSERT(src2.IsNone() && src3.IsNone());
+ DCHECK(src2.IsNone() && src3.IsNone());
stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
break;
case 3:
- ASSERT(src3.IsNone());
+ DCHECK(src3.IsNone());
stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
str(src0, MemOperand(StackPointer(), 2 * size));
break;
@@ -1042,22 +1139,22 @@ void MacroAssembler::PopHelper(int count, int size,
// Ensure that we don't unintentially modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(size == dst0.SizeInBytes());
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ DCHECK(size == dst0.SizeInBytes());
// When popping multiple registers, the load order is chosen such that
// Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
switch (count) {
case 1:
- ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
break;
case 2:
- ASSERT(dst2.IsNone() && dst3.IsNone());
+ DCHECK(dst2.IsNone() && dst3.IsNone());
ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
break;
case 3:
- ASSERT(dst3.IsNone());
+ DCHECK(dst3.IsNone());
ldr(dst2, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
break;
@@ -1075,15 +1172,13 @@ void MacroAssembler::PopHelper(int count, int size,
}
-void MacroAssembler::PrepareForPush(Operand total_size) {
- // TODO(jbramley): This assertion generates too much code in some debug tests.
- // AssertStackConsistency();
+void MacroAssembler::PushPreamble(Operand total_size) {
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.immediate() % 16) == 0);
+ DCHECK((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
@@ -1097,25 +1192,29 @@ void MacroAssembler::PrepareForPush(Operand total_size) {
}
-void MacroAssembler::PrepareForPop(Operand total_size) {
- AssertStackConsistency();
+void MacroAssembler::PopPostamble(Operand total_size) {
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.immediate() % 16) == 0);
+ DCHECK((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ SyncSystemStackPointer();
}
}
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ DCHECK(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
@@ -1127,7 +1226,7 @@ void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ DCHECK(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
@@ -1140,8 +1239,8 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
void MacroAssembler::PokePair(const CPURegister& src1,
const CPURegister& src2,
int offset) {
- ASSERT(AreSameSizeAndType(src1, src2));
- ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ DCHECK(AreSameSizeAndType(src1, src2));
+ DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
Stp(src1, src2, MemOperand(StackPointer(), offset));
}
@@ -1149,8 +1248,8 @@ void MacroAssembler::PokePair(const CPURegister& src1,
void MacroAssembler::PeekPair(const CPURegister& dst1,
const CPURegister& dst2,
int offset) {
- ASSERT(AreSameSizeAndType(dst1, dst2));
- ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ DCHECK(AreSameSizeAndType(dst1, dst2));
+ DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
}
@@ -1161,7 +1260,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSize, PreIndex);
@@ -1185,7 +1284,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, 2 * kXRegSize, PostIndex);
@@ -1204,20 +1303,27 @@ void MacroAssembler::PopCalleeSavedRegisters() {
void MacroAssembler::AssertStackConsistency() {
- if (emit_debug_code()) {
- if (csp.Is(StackPointer())) {
- // We can't check the alignment of csp without using a scratch register
- // (or clobbering the flags), but the processor (or simulator) will abort
- // if it is not properly aligned during a load.
+ // Avoid emitting code when !use_real_abort() since non-real aborts cause too
+ // much code to be generated.
+ if (emit_debug_code() && use_real_aborts()) {
+ if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
+ // can't check the alignment of csp without using a scratch register (or
+ // clobbering the flags), but the processor (or simulator) will abort if
+ // it is not properly aligned during a load.
ldr(xzr, MemOperand(csp, 0));
- } else if (FLAG_enable_slow_asserts) {
+ }
+ if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
Label ok;
// Check that csp <= StackPointer(), preserving all registers and NZCV.
sub(StackPointer(), csp, StackPointer());
cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
- Abort(kTheCurrentStackPointerIsBelowCsp);
+ // Avoid generating AssertStackConsistency checks for the Push in Abort.
+ { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
+ Abort(kTheCurrentStackPointerIsBelowCsp);
+ }
bind(&ok);
// Restore StackPointer().
@@ -1334,15 +1440,14 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
+ Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
And(dst, dst, Map::EnumLengthBits::kMask);
}
void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- And(dst, dst, Smi::FromInt(Map::EnumLengthBits::kMask));
+ EnumLengthUntagged(dst, map);
+ SmiTag(dst, dst);
}
@@ -1353,7 +1458,7 @@ void MacroAssembler::CheckEnumCache(Register object,
Register scratch2,
Register scratch3,
Label* call_runtime) {
- ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
scratch3));
Register empty_fixed_array_value = scratch0;
@@ -1435,7 +1540,7 @@ void MacroAssembler::JumpToHandlerEntry(Register exception,
Register scratch1,
Register scratch2) {
// Handler expects argument in x0.
- ASSERT(exception.Is(x0));
+ DCHECK(exception.Is(x0));
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
@@ -1453,7 +1558,7 @@ void MacroAssembler::JumpToHandlerEntry(Register exception,
void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
- ASSERT(cond == eq || cond == ne);
+ DCHECK(cond == eq || cond == ne);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
And(temp, object, ExternalReference::new_space_mask(isolate()));
@@ -1476,10 +1581,10 @@ void MacroAssembler::Throw(Register value,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
- ASSERT(value.Is(x0));
+ DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top handler.
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
@@ -1518,10 +1623,10 @@ void MacroAssembler::ThrowUncatchable(Register value,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
- ASSERT(value.Is(x0));
+ DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top stack handler.
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
@@ -1551,50 +1656,8 @@ void MacroAssembler::ThrowUncatchable(Register value,
}
-void MacroAssembler::Throw(BailoutReason reason) {
- Label throw_start;
- Bind(&throw_start);
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- RecordComment("Throw message: ");
- RecordComment((msg != NULL) ? msg : "UNKNOWN");
-#endif
-
- Mov(x0, Smi::FromInt(reason));
- Push(x0);
-
- // Disable stub call restrictions to always allow calls to throw.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kHiddenThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kHiddenThrowMessage, 1);
- }
- // ThrowMessage should not return here.
- Unreachable();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cond, BailoutReason reason) {
- Label ok;
- B(InvertCondition(cond), &ok);
- Throw(reason);
- Bind(&ok);
-}
-
-
-void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
- Label ok;
- JumpIfNotSmi(value, &ok);
- Throw(reason);
- Bind(&ok);
-}
-
-
void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
- ASSERT(smi.Is64Bits());
+ DCHECK(smi.Is64Bits());
Abs(smi, smi, slow);
}
@@ -1660,7 +1723,7 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -1712,7 +1775,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(isolate()),
next_address);
- ASSERT(function_address.is(x1) || function_address.is(x2));
+ DCHECK(function_address.is(x1) || function_address.is(x2));
Label profiler_disabled;
Label end_profiler_check;
@@ -1821,7 +1884,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
ExternalReference(
- Runtime::kHiddenPromoteScheduledException, isolate()), 0);
+ Runtime::kPromoteScheduledException, isolate()), 0);
}
B(&exception_handled);
@@ -1870,7 +1933,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
void MacroAssembler::GetBuiltinEntry(Register target,
Register function,
Builtins::JavaScript id) {
- ASSERT(!AreAliased(target, function));
+ DCHECK(!AreAliased(target, function));
GetBuiltinFunction(function, id);
// Load the code entry point from the builtins object.
Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
@@ -1882,7 +1945,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
const CallWrapper& call_wrapper) {
ASM_LOCATION("MacroAssembler::InvokeBuiltin");
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Get the builtin entry in x2 and setup the function object in x1.
GetBuiltinEntry(x2, x1, id);
@@ -1891,7 +1954,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Call(x2);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(x2);
}
}
@@ -1923,7 +1986,7 @@ void MacroAssembler::InitializeNewString(Register string,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
- ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ DCHECK(!AreAliased(string, length, scratch1, scratch2));
LoadRoot(scratch2, map_index);
SmiTag(scratch1, length);
Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
@@ -1940,7 +2003,7 @@ int MacroAssembler::ActivationFrameAlignment() {
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
+ return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
@@ -1970,10 +2033,10 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
int num_of_reg_args,
int num_of_double_args) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// We can pass 8 integer arguments in registers. If we need to pass more than
// that, we'll need to implement support for passing them on the stack.
- ASSERT(num_of_reg_args <= 8);
+ DCHECK(num_of_reg_args <= 8);
// If we're passing doubles, we're limited to the following prototypes
// (defined by ExternalReference::Type):
@@ -1982,8 +2045,8 @@ void MacroAssembler::CallCFunction(Register function,
// BUILTIN_FP_CALL: double f(double)
// BUILTIN_FP_INT_CALL: double f(double, int)
if (num_of_double_args > 0) {
- ASSERT(num_of_reg_args <= 1);
- ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ DCHECK(num_of_reg_args <= 1);
+ DCHECK((num_of_double_args + num_of_reg_args) <= 2);
}
@@ -1995,12 +2058,12 @@ void MacroAssembler::CallCFunction(Register function,
int sp_alignment = ActivationFrameAlignment();
// The ABI mandates at least 16-byte alignment.
- ASSERT(sp_alignment >= 16);
- ASSERT(IsPowerOf2(sp_alignment));
+ DCHECK(sp_alignment >= 16);
+ DCHECK(IsPowerOf2(sp_alignment));
// The current stack pointer is a callee saved register, and is preserved
// across the call.
- ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+ DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
// Align and synchronize the system stack pointer with jssp.
Bic(csp, old_stack_pointer, sp_alignment - 1);
@@ -2018,7 +2081,7 @@ void MacroAssembler::CallCFunction(Register function,
// where we only pushed one W register on top of an aligned jssp.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- ASSERT(ActivationFrameAlignment() == 16);
+ DCHECK(ActivationFrameAlignment() == 16);
Sub(temp, csp, old_stack_pointer);
// We want temp <= 0 && temp >= -12.
Cmp(temp, 0);
@@ -2044,13 +2107,13 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
}
@@ -2099,7 +2162,7 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
positions_recorder()->WriteRecordedPositions();
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -2108,12 +2171,12 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
- ASSERT(((imm >> 48) & 0xffff) == 0);
+ DCHECK(((imm >> 48) & 0xffff) == 0);
movz(temp, (imm >> 0) & 0xffff, 0);
movk(temp, (imm >> 16) & 0xffff, 16);
movk(temp, (imm >> 32) & 0xffff, 32);
} else {
- LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
+ Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
}
Blr(temp);
#ifdef DEBUG
@@ -2161,7 +2224,7 @@ int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
@@ -2178,7 +2241,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
USE(ast_id);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
@@ -2195,7 +2258,7 @@ void MacroAssembler::JumpForHeapNumber(Register object,
Register heap_number_map,
Label* on_heap_number,
Label* on_not_heap_number) {
- ASSERT(on_heap_number || on_not_heap_number);
+ DCHECK(on_heap_number || on_not_heap_number);
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
@@ -2209,7 +2272,7 @@ void MacroAssembler::JumpForHeapNumber(Register object,
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
- ASSERT(!AreAliased(temp, heap_number_map));
+ DCHECK(!AreAliased(temp, heap_number_map));
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
Cmp(temp, heap_number_map);
@@ -2249,7 +2312,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
Register scratch2,
Register scratch3,
Label* not_found) {
- ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
@@ -2353,6 +2416,16 @@ void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
}
+void MacroAssembler::JumpIfMinusZero(Register input,
+ Label* on_negative_zero) {
+ DCHECK(input.Is64Bits());
+ // Floating point value is in an integer register. Detect -0.0 by subtracting
+ // 1 (cmp), which will cause overflow.
+ Cmp(input, 1);
+ B(vs, on_negative_zero);
+}
+
+
void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
// Clamp the value to [0..255].
Cmp(input.W(), Operand(input.W(), UXTB));
@@ -2398,9 +2471,9 @@ void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
Register scratch5) {
// Untag src and dst into scratch registers.
// Copy src->dst in a tight loop.
- ASSERT(!AreAliased(dst, src,
+ DCHECK(!AreAliased(dst, src,
scratch1, scratch2, scratch3, scratch4, scratch5));
- ASSERT(count >= 2);
+ DCHECK(count >= 2);
const Register& remaining = scratch3;
Mov(remaining, count / 2);
@@ -2437,7 +2510,7 @@ void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
Register scratch4) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
+ DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
@@ -2466,7 +2539,7 @@ void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
Register scratch3) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
@@ -2495,10 +2568,10 @@ void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
//
// In both cases, fields are copied in pairs if possible, and left-overs are
// handled separately.
- ASSERT(!AreAliased(dst, src));
- ASSERT(!temps.IncludesAliasOf(dst));
- ASSERT(!temps.IncludesAliasOf(src));
- ASSERT(!temps.IncludesAliasOf(xzr));
+ DCHECK(!AreAliased(dst, src));
+ DCHECK(!temps.IncludesAliasOf(dst));
+ DCHECK(!temps.IncludesAliasOf(src));
+ DCHECK(!temps.IncludesAliasOf(xzr));
if (emit_debug_code()) {
Cmp(dst, src);
@@ -2542,8 +2615,8 @@ void MacroAssembler::CopyBytes(Register dst,
UseScratchRegisterScope temps(this);
Register tmp1 = temps.AcquireX();
Register tmp2 = temps.AcquireX();
- ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
- ASSERT(!AreAliased(src, dst, csp));
+ DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+ DCHECK(!AreAliased(src, dst, csp));
if (emit_debug_code()) {
// Check copy length.
@@ -2592,7 +2665,7 @@ void MacroAssembler::CopyBytes(Register dst,
void MacroAssembler::FillFields(Register dst,
Register field_count,
Register filler) {
- ASSERT(!dst.Is(csp));
+ DCHECK(!dst.Is(csp));
UseScratchRegisterScope temps(this);
Register field_ptr = temps.AcquireX();
Register counter = temps.AcquireX();
@@ -2637,7 +2710,7 @@ void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) {
- ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ DCHECK(smi_check == DONT_DO_SMI_CHECK);
Label not_smi;
JumpIfEitherSmi(first, second, NULL, &not_smi);
@@ -2668,8 +2741,8 @@ void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
Register scratch1,
Register scratch2,
Label* failure) {
- ASSERT(!AreAliased(scratch1, second));
- ASSERT(!AreAliased(scratch1, scratch2));
+ DCHECK(!AreAliased(scratch1, second));
+ DCHECK(!AreAliased(scratch1, scratch2));
static const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
@@ -2700,7 +2773,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch1,
Register scratch2,
Label* failure) {
- ASSERT(!AreAliased(first, second, scratch1, scratch2));
+ DCHECK(!AreAliased(first, second, scratch1, scratch2));
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatAsciiStringTag =
@@ -2748,12 +2821,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(x0));
- ASSERT(expected.is_immediate() || expected.reg().is(x2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+ DCHECK(actual.is_immediate() || actual.reg().is(x0));
+ DCHECK(expected.is_immediate() || expected.reg().is(x2));
+ DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
@@ -2816,7 +2889,7 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
@@ -2833,7 +2906,7 @@ void MacroAssembler::InvokeCode(Register code,
Call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(code);
}
}
@@ -2849,11 +2922,11 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
- ASSERT(function.is(x1));
+ DCHECK(function.is(x1));
Register expected_reg = x2;
Register code_reg = x3;
@@ -2881,11 +2954,11 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
- ASSERT(function.Is(x1));
+ DCHECK(function.Is(x1));
Register code_reg = x3;
@@ -2940,15 +3013,14 @@ void MacroAssembler::TryConvertDoubleToInt64(Register result,
void MacroAssembler::TruncateDoubleToI(Register result,
DoubleRegister double_input) {
Label done;
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Try to convert the double to an int64. If successful, the bottom 32 bits
// contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
- Push(lr);
- Push(double_input); // Put input on stack.
+ Push(lr, double_input);
DoubleToIStub stub(isolate(),
jssp,
@@ -2968,8 +3040,8 @@ void MacroAssembler::TruncateDoubleToI(Register result,
void MacroAssembler::TruncateHeapNumberToI(Register result,
Register object) {
Label done;
- ASSERT(!result.is(object));
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(!result.is(object));
+ DCHECK(jssp.Is(StackPointer()));
Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
@@ -2992,29 +3064,30 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
- ASSERT(StackPointer().Is(jssp));
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- __ Mov(temp, Smi::FromInt(StackFrame::STUB));
- // Compiled stubs don't age, and so they don't need the predictable code
- // ageing sequence.
- __ Push(lr, fp, cp, temp);
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+void MacroAssembler::StubPrologue() {
+ DCHECK(StackPointer().Is(jssp));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ __ Mov(temp, Smi::FromInt(StackFrame::STUB));
+ // Compiled stubs don't age, and so they don't need the predictable code
+ // ageing sequence.
+ __ Push(lr, fp, cp, temp);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ if (code_pre_aging) {
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ __ EmitCodeAgeSequence(stub);
} else {
- if (isolate()->IsCodePreAgingActive()) {
- Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
- __ EmitCodeAgeSequence(stub);
- } else {
- __ EmitFrameSetupForCodeAgePatching();
- }
+ __ EmitFrameSetupForCodeAgePatching();
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this);
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
@@ -3035,7 +3108,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(jssp, fp);
@@ -3053,7 +3126,7 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
CPURegList saved_fp_regs = kCallerSavedFP;
- ASSERT(saved_fp_regs.Count() % 2 == 0);
+ DCHECK(saved_fp_regs.Count() % 2 == 0);
int offset = ExitFrameConstants::kLastExitFrameField;
while (!saved_fp_regs.IsEmpty()) {
@@ -3068,7 +3141,7 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
void MacroAssembler::EnterExitFrame(bool save_doubles,
const Register& scratch,
int extra_space) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Set up the new stack frame.
Mov(scratch, Operand(CodeObject()));
@@ -3114,7 +3187,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// Align and synchronize the system stack pointer with jssp.
AlignAndSetCSPForFrame();
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
@@ -3138,7 +3211,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
bool restore_context) {
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
ExitFrameRestoreFPRegs();
@@ -3185,7 +3258,7 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- ASSERT(value != 0);
+ DCHECK(value != 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Mov(scratch2, ExternalReference(counter));
Ldr(scratch1, MemOperand(scratch2));
@@ -3221,14 +3294,14 @@ void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(isolate(), 1);
- ASSERT(AllowThisStubCall(&ces));
+ DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Adjust this code if the asserts don't hold.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -3250,7 +3323,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- ASSERT(Smi::FromInt(0) == 0);
+ DCHECK(Smi::FromInt(0) == 0);
Push(xzr, xzr, x11, x10);
} else {
Push(fp, cp, x11, x10);
@@ -3280,7 +3353,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3296,14 +3369,14 @@ void MacroAssembler::Allocate(int object_size,
UseScratchRegisterScope temps(this);
Register scratch3 = temps.AcquireX();
- ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
- ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+ DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
+ DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
- ASSERT(0 == (object_size & kObjectAlignmentMask));
+ DCHECK(0 == (object_size & kObjectAlignmentMask));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDP.
@@ -3313,7 +3386,7 @@ void MacroAssembler::Allocate(int object_size,
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register top_address = scratch1;
@@ -3372,8 +3445,8 @@ void MacroAssembler::Allocate(Register object_size,
UseScratchRegisterScope temps(this);
Register scratch3 = temps.AcquireX();
- ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
- ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+ DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+ DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
scratch1.Is64Bits() && scratch2.Is64Bits());
// Check relative positions of allocation top and limit addresses.
@@ -3384,7 +3457,7 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register top_address = scratch1;
@@ -3458,7 +3531,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
Register scratch2,
Register scratch3,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -3489,7 +3562,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Register scratch2,
Register scratch3,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -3535,33 +3608,12 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
- Mov(scratch1, high_promotion_mode);
- Ldr(scratch1, MemOperand(scratch1));
- Cbz(scratch1, &allocate_new_space);
-
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- B(&install_map);
-
- Bind(&allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- flags);
-
- Bind(&install_map);
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3576,7 +3628,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
@@ -3593,7 +3645,7 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
@@ -3612,8 +3664,9 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
CPURegister value,
- CPURegister heap_number_map) {
- ASSERT(!value.IsValid() || value.Is64Bits());
+ CPURegister heap_number_map,
+ MutableMode mode) {
+ DCHECK(!value.IsValid() || value.Is64Bits());
UseScratchRegisterScope temps(this);
// Allocate an object in the heap for the heap number and tag it as a heap
@@ -3621,6 +3674,10 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
+
// Prepare the heap number map.
if (!heap_number_map.IsValid()) {
// If we have a valid value register, use the same type of register to store
@@ -3630,7 +3687,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
} else {
heap_number_map = scratch1;
}
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ LoadRoot(heap_number_map, map_index);
}
if (emit_debug_code()) {
Register map;
@@ -3640,7 +3697,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
} else {
map = Register(heap_number_map);
}
- AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
+ AssertRegisterIsRoot(map, map_index);
}
// Store the heap number map and the value in the allocated object.
@@ -3781,7 +3838,7 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
__ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DecodeField<Map::ElementsKindBits>(result);
}
@@ -3790,15 +3847,16 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
Register scratch,
Label* miss,
BoundFunctionAction action) {
- ASSERT(!AreAliased(function, result, scratch));
+ DCHECK(!AreAliased(function, result, scratch));
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
+ Label non_instance;
+ if (action == kMissOnBoundFunction) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
- // Check that the function really is a function. Load map into result reg.
- JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
- if (action == kMissOnBoundFunction) {
Register scratch_w = scratch.W();
Ldr(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
@@ -3807,12 +3865,11 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
Ldr(scratch_w,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+ // Make sure that the function has an instance prototype.
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+ }
// Get the prototype or initial map from the function.
Ldr(result,
@@ -3829,12 +3886,15 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Get the prototype from the initial map.
Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- B(&done);
- // Non-instance prototype: fetch prototype from constructor field in initial
- // map.
- Bind(&non_instance);
- Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ if (action == kMissOnBoundFunction) {
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ }
// All done.
Bind(&done);
@@ -3845,7 +3905,7 @@ void MacroAssembler::CompareRoot(const Register& obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- ASSERT(!AreAliased(obj, temp));
+ DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index);
Cmp(obj, temp);
}
@@ -3880,7 +3940,7 @@ void MacroAssembler::CompareAndSplit(const Register& lhs,
} else if (if_false == fall_through) {
CompareAndBranch(lhs, rhs, cond, if_true);
} else if (if_true == fall_through) {
- CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
+ CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
} else {
CompareAndBranch(lhs, rhs, cond, if_true);
B(if_false);
@@ -3946,7 +4006,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
FPRegister fpscratch1,
Label* fail,
int elements_offset) {
- ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label store_num;
// Speculatively convert the smi to a double - all smis can be exactly
@@ -3985,13 +4045,10 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- SmiTag(index, hash);
+ DecodeField<String::ArrayIndexValueBits>(index, hash);
+ SmiTag(index, index);
}
@@ -4001,7 +4058,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(
SeqStringSetCharCheckIndexType index_type,
Register scratch,
uint32_t encoding_mask) {
- ASSERT(!AreAliased(string, index, scratch));
+ DCHECK(!AreAliased(string, index, scratch));
if (index_type == kIndexIsSmi) {
AssertSmi(index);
@@ -4022,7 +4079,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(
Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
Check(lt, kIndexIsTooLarge);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
Cmp(index, 0);
Check(ge, kIndexIsNegative);
}
@@ -4032,7 +4089,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch1,
Register scratch2,
Label* miss) {
- ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+ DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
Label same_contexts;
// Load current lexical context from the stack frame.
@@ -4094,10 +4151,10 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register key, Register scratch) {
- ASSERT(!AreAliased(key, scratch));
+ DCHECK(!AreAliased(key, scratch));
// Xor original key with a seed.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -4136,7 +4193,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register scratch1,
Register scratch2,
Register scratch3) {
- ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
Label done;
@@ -4160,7 +4217,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
And(scratch2, scratch2, scratch1);
// Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
@@ -4195,7 +4252,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register scratch1,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
- ASSERT(!AreAliased(object, address, scratch1));
+ DCHECK(!AreAliased(object, address, scratch1));
Label done, store_buffer_overflow;
if (emit_debug_code()) {
Label ok;
@@ -4215,12 +4272,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Str(scratch1, MemOperand(scratch2));
// Call stub on end of buffer.
// Check for end of buffer.
- ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
(1 << (14 + kPointerSizeLog2)));
if (and_then == kFallThroughAtEnd) {
Tbz(scratch1, (14 + kPointerSizeLog2), &done);
} else {
- ASSERT(and_then == kReturnAtEnd);
+ DCHECK(and_then == kReturnAtEnd);
Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
Ret();
}
@@ -4250,7 +4307,7 @@ void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the stack, so
// adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
+ DCHECK(num_unsaved >= 0);
Claim(num_unsaved);
PushXRegList(kSafepointSavedRegisters);
}
@@ -4272,7 +4329,7 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
- ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+ DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
// Safepoint registers are stored contiguously on the stack, but not all the
// registers are saved. The following registers are excluded:
@@ -4329,7 +4386,8 @@ void MacroAssembler::RecordWriteField(
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -4341,7 +4399,7 @@ void MacroAssembler::RecordWriteField(
// Although the object register is tagged, the offset is relative to the start
// of the object, so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
Add(scratch, object, offset - kHeapObjectTag);
if (emit_debug_code()) {
@@ -4358,7 +4416,8 @@ void MacroAssembler::RecordWriteField(
lr_status,
save_fp,
remembered_set_action,
- OMIT_SMI_CHECK);
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
Bind(&done);
@@ -4371,20 +4430,94 @@ void MacroAssembler::RecordWriteField(
}
+// Will clobber: object, map, dst.
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ DCHECK(!AreAliased(object, map));
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareMap(map, temp, isolate()->factory()->meta_map());
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(temp, map);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlagClear(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
+ dst);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
+ Mov(map, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
// Will clobber: object, address, value.
// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
//
// The register 'object' contains a heap object pointer. The heap object tag is
// shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASM_LOCATION("MacroAssembler::RecordWrite");
- ASSERT(!AreAliased(object, value));
+ DCHECK(!AreAliased(object, value));
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
@@ -4395,23 +4528,21 @@ void MacroAssembler::RecordWrite(Register object,
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- // TODO(mstarzinger): Dynamic counter missing.
-
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
+ DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
- CheckPageFlagClear(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- &done);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+ }
CheckPageFlagClear(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -4430,6 +4561,11 @@ void MacroAssembler::RecordWrite(Register object,
Bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
+ value);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
@@ -4443,7 +4579,7 @@ void MacroAssembler::AssertHasValidColor(const Register& reg) {
if (emit_debug_code()) {
// The bit sequence is backward. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label color_is_valid;
Tbnz(reg, 0, &color_is_valid);
@@ -4457,8 +4593,8 @@ void MacroAssembler::AssertHasValidColor(const Register& reg) {
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register shift_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
- ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+ DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
// addr_reg is divided into fields:
// |63 page base 20|19 high 8|7 shift 3|2 0|
// 'high' gives the index of the cell holding color bits for the object.
@@ -4482,7 +4618,7 @@ void MacroAssembler::HasColor(Register object,
int first_bit,
int second_bit) {
// See mark-compact.h for color definitions.
- ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+ DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
GetMarkBits(object, bitmap_scratch, shift_scratch);
Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
@@ -4493,14 +4629,14 @@ void MacroAssembler::HasColor(Register object,
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
// Check for the color.
if (first_bit == 0) {
// Checking for white.
- ASSERT(second_bit == 0);
+ DCHECK(second_bit == 0);
// We only need to test the first bit.
Tbz(bitmap_scratch, 0, has_color);
} else {
@@ -4524,7 +4660,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Mov(scratch, Operand(map));
- Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
+ Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
}
}
@@ -4534,7 +4670,7 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
}
@@ -4544,7 +4680,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register scratch0,
Register scratch1,
Label* found) {
- ASSERT(!AreAliased(object, scratch0, scratch1));
+ DCHECK(!AreAliased(object, scratch0, scratch1));
Factory* factory = isolate()->factory();
Register current = scratch0;
Label loop_again;
@@ -4556,7 +4692,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Bind(&loop_again);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DecodeField<Map::ElementsKindBits>(scratch1);
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
@@ -4565,7 +4701,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Register result) {
- ASSERT(!result.Is(ldr_location));
+ DCHECK(!result.Is(ldr_location));
const uint32_t kLdrLitOffset_lsb = 5;
const uint32_t kLdrLitOffset_width = 19;
Ldr(result, MemOperand(ldr_location));
@@ -4588,14 +4724,14 @@ void MacroAssembler::EnsureNotWhite(
Register load_scratch,
Register length_scratch,
Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(
+ DCHECK(!AreAliased(
value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
GetMarkBits(value, bitmap_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
@@ -4619,8 +4755,8 @@ void MacroAssembler::EnsureNotWhite(
JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
@@ -4634,8 +4770,8 @@ void MacroAssembler::EnsureNotWhite(
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
Mov(length_scratch, ExternalString::kSize);
TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
@@ -4643,7 +4779,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
String::kLengthOffset));
Tst(instance_type, kStringEncodingMask);
@@ -4869,110 +5005,97 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
-
- // Make sure that the macro assembler doesn't try to use any of our arguments
- // as scratch registers.
- ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
- ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
-
- // We cannot print the stack pointer because it is typically used to preserve
- // caller-saved registers (using other Printf variants which depend on this
- // helper).
- ASSERT(!AreAliased(arg0, StackPointer()));
- ASSERT(!AreAliased(arg1, StackPointer()));
- ASSERT(!AreAliased(arg2, StackPointer()));
- ASSERT(!AreAliased(arg3, StackPointer()));
-
- static const int kMaxArgCount = 4;
- // Assume that we have the maximum number of arguments until we know
- // otherwise.
- int arg_count = kMaxArgCount;
-
- // The provided arguments.
- CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
-
- // The PCS registers where the arguments need to end up.
- CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
-
- // Promote FP arguments to doubles, and integer arguments to X registers.
- // Note that FP and integer arguments cannot be mixed, but we'll check
- // AreSameSizeAndType once we've processed these promotions.
- for (int i = 0; i < kMaxArgCount; i++) {
+ DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+
+ // The provided arguments, and their proper procedure-call standard registers.
+ CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
+ CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
+
+ int arg_count = kPrintfMaxArgCount;
+
+ // The PCS varargs registers for printf. Note that x0 is used for the printf
+ // format string.
+ static const CPURegList kPCSVarargs =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
+ static const CPURegList kPCSVarargsFP =
+ CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
+
+ // We can use caller-saved registers as scratch values, except for the
+ // arguments and the PCS registers where they might need to go.
+ CPURegList tmp_list = kCallerSaved;
+ tmp_list.Remove(x0); // Used to pass the format string.
+ tmp_list.Remove(kPCSVarargs);
+ tmp_list.Remove(arg0, arg1, arg2, arg3);
+
+ CPURegList fp_tmp_list = kCallerSavedFP;
+ fp_tmp_list.Remove(kPCSVarargsFP);
+ fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
+
+ // Override the MacroAssembler's scratch register list. The lists will be
+ // reset automatically at the end of the UseScratchRegisterScope.
+ UseScratchRegisterScope temps(this);
+ TmpList()->set_list(tmp_list.list());
+ FPTmpList()->set_list(fp_tmp_list.list());
+
+ // Copies of the printf vararg registers that we can pop from.
+ CPURegList pcs_varargs = kPCSVarargs;
+ CPURegList pcs_varargs_fp = kPCSVarargsFP;
+
+ // Place the arguments. There are lots of clever tricks and optimizations we
+ // could use here, but Printf is a debug tool so instead we just try to keep
+ // it simple: Move each input that isn't already in the right place to a
+ // scratch register, then move everything back.
+ for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
+ // Work out the proper PCS register for this argument.
if (args[i].IsRegister()) {
- // Note that we use x1 onwards, because x0 will hold the format string.
- pcs[i] = Register::XRegFromCode(i + 1);
- // For simplicity, we handle all integer arguments as X registers. An X
- // register argument takes the same space as a W register argument in the
- // PCS anyway. The only limitation is that we must explicitly clear the
- // top word for W register arguments as the callee will expect it to be
- // clear.
- if (!args[i].Is64Bits()) {
- const Register& as_x = args[i].X();
- And(as_x, as_x, 0x00000000ffffffff);
- args[i] = as_x;
- }
+ pcs[i] = pcs_varargs.PopLowestIndex().X();
+ // We might only need a W register here. We need to know the size of the
+ // argument so we can properly encode it for the simulator call.
+ if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
} else if (args[i].IsFPRegister()) {
- pcs[i] = FPRegister::DRegFromCode(i);
- // C and C++ varargs functions (such as printf) implicitly promote float
- // arguments to doubles.
- if (!args[i].Is64Bits()) {
- FPRegister s(args[i]);
- const FPRegister& as_d = args[i].D();
- Fcvt(as_d, s);
- args[i] = as_d;
- }
+ // In C, floats are always cast to doubles for varargs calls.
+ pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
} else {
- // This is the first empty (NoCPUReg) argument, so use it to set the
- // argument count and bail out.
+ DCHECK(args[i].IsNone());
arg_count = i;
break;
}
- }
- ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
- // Check that every remaining argument is NoCPUReg.
- for (int i = arg_count; i < kMaxArgCount; i++) {
- ASSERT(args[i].IsNone());
- }
- ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
- args[2], args[3],
- pcs[0], pcs[1],
- pcs[2], pcs[3]));
- // Move the arguments into the appropriate PCS registers.
- //
- // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
- // surprisingly complicated.
- //
- // * For even numbers of registers, we push the arguments and then pop them
- // into their final registers. This maintains 16-byte stack alignment in
- // case csp is the stack pointer, since we're only handling X or D
- // registers at this point.
- //
- // * For odd numbers of registers, we push and pop all but one register in
- // the same way, but the left-over register is moved directly, since we
- // can always safely move one register without clobbering any source.
- if (arg_count >= 4) {
- Push(args[3], args[2], args[1], args[0]);
- } else if (arg_count >= 2) {
- Push(args[1], args[0]);
- }
-
- if ((arg_count % 2) != 0) {
- // Move the left-over register directly.
- const CPURegister& leftover_arg = args[arg_count - 1];
- const CPURegister& leftover_pcs = pcs[arg_count - 1];
- if (leftover_arg.IsRegister()) {
- Mov(Register(leftover_pcs), Register(leftover_arg));
- } else {
- Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
+ // If the argument is already in the right place, leave it where it is.
+ if (args[i].Aliases(pcs[i])) continue;
+
+ // Otherwise, if the argument is in a PCS argument register, allocate an
+ // appropriate scratch register and then move it out of the way.
+ if (kPCSVarargs.IncludesAliasOf(args[i]) ||
+ kPCSVarargsFP.IncludesAliasOf(args[i])) {
+ if (args[i].IsRegister()) {
+ Register old_arg = Register(args[i]);
+ Register new_arg = temps.AcquireSameSizeAs(old_arg);
+ Mov(new_arg, old_arg);
+ args[i] = new_arg;
+ } else {
+ FPRegister old_arg = FPRegister(args[i]);
+ FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
+ Fmov(new_arg, old_arg);
+ args[i] = new_arg;
+ }
}
}
- if (arg_count >= 4) {
- Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
- } else if (arg_count >= 2) {
- Pop(pcs[0], pcs[1]);
+ // Do a second pass to move values into their final positions and perform any
+ // conversions that may be required.
+ for (int i = 0; i < arg_count; i++) {
+ DCHECK(pcs[i].type() == args[i].type());
+ if (pcs[i].IsRegister()) {
+ Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
+ } else {
+ DCHECK(pcs[i].IsFPRegister());
+ if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
+ Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
+ } else {
+ Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
+ }
+ }
}
// Load the format string into x0, as per the procedure-call standard.
@@ -5000,18 +5123,33 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Bic(csp, StackPointer(), 0xf);
}
- CallPrintf(pcs[0].type());
+ CallPrintf(arg_count, pcs);
}
-void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
+void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
// A call to printf needs special handling for the simulator, since the system
// printf function will use a different instruction set and the procedure-call
// standard will not be compatible.
#ifdef USE_SIMULATOR
{ InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
hlt(kImmExceptionIsPrintf);
- dc32(type);
+ dc32(arg_count); // kPrintfArgCountOffset
+
+ // Determine the argument pattern.
+ uint32_t arg_pattern_list = 0;
+ for (int i = 0; i < arg_count; i++) {
+ uint32_t arg_pattern;
+ if (args[i].IsRegister()) {
+ arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
+ } else {
+ DCHECK(args[i].Is64Bits());
+ arg_pattern = kPrintfArgD;
+ }
+ DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
+ arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
+ }
+ dc32(arg_pattern_list); // kPrintfArgPatternListOffset
}
#else
Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
@@ -5020,10 +5158,18 @@ void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
void MacroAssembler::Printf(const char * format,
- const CPURegister& arg0,
- const CPURegister& arg1,
- const CPURegister& arg2,
- const CPURegister& arg3) {
+ CPURegister arg0,
+ CPURegister arg1,
+ CPURegister arg2,
+ CPURegister arg3) {
+ // We can only print sp if it is the current stack pointer.
+ if (!csp.Is(StackPointer())) {
+ DCHECK(!csp.Aliases(arg0));
+ DCHECK(!csp.Aliases(arg1));
+ DCHECK(!csp.Aliases(arg2));
+ DCHECK(!csp.Aliases(arg3));
+ }
+
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@@ -5045,19 +5191,41 @@ void MacroAssembler::Printf(const char * format,
TmpList()->set_list(tmp_list.list());
FPTmpList()->set_list(fp_tmp_list.list());
- // Preserve NZCV.
{ UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Mrs(tmp, NZCV);
- Push(tmp, xzr);
- }
+ // If any of the arguments are the current stack pointer, allocate a new
+ // register for them, and adjust the value to compensate for pushing the
+ // caller-saved registers.
+ bool arg0_sp = StackPointer().Aliases(arg0);
+ bool arg1_sp = StackPointer().Aliases(arg1);
+ bool arg2_sp = StackPointer().Aliases(arg2);
+ bool arg3_sp = StackPointer().Aliases(arg3);
+ if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
+ // Allocate a register to hold the original stack pointer value, to pass
+ // to PrintfNoPreserve as an argument.
+ Register arg_sp = temps.AcquireX();
+ Add(arg_sp, StackPointer(),
+ kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
+ if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
+ if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
+ if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
+ if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
+ }
- PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+ // Preserve NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mrs(tmp, NZCV);
+ Push(tmp, xzr);
+ }
- { UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Pop(xzr, tmp);
- Msr(NZCV, tmp);
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ // Restore NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Pop(xzr, tmp);
+ Msr(NZCV, tmp);
+ }
}
PopCPURegList(kCallerSavedFP);
@@ -5074,7 +5242,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
// the sequence and copying it in the same way.
InstructionAccurateScope scope(this,
kNoCodeAgeSequenceLength / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
EmitFrameSetupForCodeAgePatching(this);
}
@@ -5083,7 +5251,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
InstructionAccurateScope scope(this,
kNoCodeAgeSequenceLength / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
EmitCodeAgeSequence(this, stub);
}
@@ -5121,7 +5289,7 @@ void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
//
// A branch (br) is used rather than a call (blr) because this code replaces
// the frame setup code that would normally preserve lr.
- __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
+ __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
__ adr(x0, &start);
__ br(ip0);
// IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
@@ -5136,7 +5304,7 @@ void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
- ASSERT(is_young ||
+ DCHECK(is_young ||
isolate->code_aging_helper()->IsOld(sequence));
return is_young;
}
@@ -5145,8 +5313,8 @@ bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
void MacroAssembler::TruncatingDiv(Register result,
Register dividend,
int32_t divisor) {
- ASSERT(!AreAliased(result, dividend));
- ASSERT(result.Is32Bits() && dividend.Is32Bits());
+ DCHECK(!AreAliased(result, dividend));
+ DCHECK(result.Is32Bits() && dividend.Is32Bits());
MultiplierAndShift ms(divisor);
Mov(result, ms.multiplier());
Smull(result.X(), dividend, result);
@@ -5183,14 +5351,14 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
- ASSERT(!AreAliased(result, xzr, csp));
+ DCHECK(!AreAliased(result, xzr, csp));
return result;
}
CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
const CPURegister& reg) {
- ASSERT(available->IncludesAliasOf(reg));
+ DCHECK(available->IncludesAliasOf(reg));
available->Remove(reg);
return reg;
}
@@ -5203,8 +5371,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
const Label* smi_check) {
Assembler::BlockPoolsScope scope(masm);
if (reg.IsValid()) {
- ASSERT(smi_check->is_bound());
- ASSERT(reg.Is64Bits());
+ DCHECK(smi_check->is_bound());
+ DCHECK(reg.Is64Bits());
// Encode the register (x0-x30) in the lowest 5 bits, then the offset to
// 'check' in the other bits. The possible offset is limited in that we
@@ -5213,7 +5381,7 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
uint32_t delta = __ InstructionsGeneratedSince(smi_check);
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else {
- ASSERT(!smi_check->is_bound());
+ DCHECK(!smi_check->is_bound());
// An offset of 0 indicates that there is no patch site.
__ InlineData(0);
@@ -5224,17 +5392,17 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
: reg_(NoReg), smi_check_(NULL) {
InstructionSequence* inline_data = InstructionSequence::At(info);
- ASSERT(inline_data->IsInlineData());
+ DCHECK(inline_data->IsInlineData());
if (inline_data->IsInlineData()) {
uint64_t payload = inline_data->InlineData();
// We use BitField to decode the payload, and BitField can only handle
// 32-bit values.
- ASSERT(is_uint32(payload));
+ DCHECK(is_uint32(payload));
if (payload != 0) {
int reg_code = RegisterBits::decode(payload);
reg_ = Register::XRegFromCode(reg_code);
uint64_t smi_check_delta = DeltaBits::decode(payload);
- ASSERT(smi_check_delta != 0);
+ DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
}
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 7d267a2cb..aa83c7040 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -7,10 +7,27 @@
#include <vector>
-#include "v8globals.h"
-#include "globals.h"
+#include "src/globals.h"
+
+#include "src/arm64/assembler-arm64-inl.h"
+
+// Simulator specific helpers.
+#if USE_SIMULATOR
+ // TODO(all): If possible automatically prepend an indicator like
+ // UNIMPLEMENTED or LOCATION.
+ #define ASM_UNIMPLEMENTED(message) \
+ __ Debug(message, __LINE__, NO_PARAM)
+ #define ASM_UNIMPLEMENTED_BREAK(message) \
+ __ Debug(message, __LINE__, \
+ FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
+ #define ASM_LOCATION(message) \
+ __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#else
+ #define ASM_UNIMPLEMENTED(message)
+ #define ASM_UNIMPLEMENTED_BREAK(message)
+ #define ASM_LOCATION(message)
+#endif
-#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
@@ -26,6 +43,11 @@ namespace internal {
V(Str, CPURegister&, rt, StoreOpFor(rt)) \
V(Ldrsw, Register&, rt, LDRSW_x)
+#define LSPAIR_MACRO_LIST(V) \
+ V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
+ V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
+ V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -82,7 +104,7 @@ enum BranchType {
inline BranchType InvertBranchType(BranchType type) {
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
return static_cast<BranchType>(
- InvertCondition(static_cast<Condition>(type)));
+ NegateCondition(static_cast<Condition>(type)));
} else {
return static_cast<BranchType>(type ^ 1);
}
@@ -90,6 +112,10 @@ inline BranchType InvertBranchType(BranchType type) {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
enum TargetAddressStorageMode {
CAN_INLINE_TARGET_ADDRESS,
@@ -199,6 +225,18 @@ class MacroAssembler : public Assembler {
static bool IsImmMovz(uint64_t imm, unsigned reg_size);
static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+ // Try to move an immediate into the destination register in a single
+ // instruction. Returns true for success, and updates the contents of dst.
+ // Returns false, otherwise.
+ bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
+
+ // Move an immediate into register dst, and return an Operand object for use
+ // with a subsequent instruction that accepts a shift. The value moved into
+ // dst is not necessarily equal to imm; it may have had a shifting operation
+ // applied to it that will be subsequently undone by the shift applied in the
+ // Operand.
+ Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
+
// Conditional macros.
inline void Ccmp(const Register& rn,
const Operand& operand,
@@ -228,6 +266,14 @@ class MacroAssembler : public Assembler {
const MemOperand& addr,
LoadStoreOp op);
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+ inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
+ LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& addr, LoadStorePairOp op);
+
// V8-specific load/store helpers.
void Load(const Register& rt, const MemOperand& addr, Representation r);
void Store(const Register& rt, const MemOperand& addr, Representation r);
@@ -351,7 +397,7 @@ class MacroAssembler : public Assembler {
// Provide a template to allow other types to be converted automatically.
template<typename T>
void Fmov(FPRegister fd, T imm) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Fmov(fd, static_cast<double>(imm));
}
inline void Fmov(Register rd, FPRegister fn);
@@ -385,19 +431,10 @@ class MacroAssembler : public Assembler {
inline void Ldnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& src);
- inline void Ldp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src);
- inline void Ldpsw(const Register& rt,
- const Register& rt2,
- const MemOperand& src);
- // Provide both double and float interfaces for FP immediate loads, rather
- // than relying on implicit C++ casts. This allows signalling NaNs to be
- // preserved when the immediate matches the format of fd. Most systems convert
- // signalling NaNs to quiet NaNs when converting between float and double.
- inline void Ldr(const FPRegister& ft, double imm);
- inline void Ldr(const FPRegister& ft, float imm);
- inline void Ldr(const Register& rt, uint64_t imm);
+ // Load a literal from the inline constant pool.
+ inline void Ldr(const CPURegister& rt, const Immediate& imm);
+ // Helper function for double immediate.
+ inline void Ldr(const CPURegister& rt, double imm);
inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
@@ -453,9 +490,6 @@ class MacroAssembler : public Assembler {
inline void Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst);
- inline void Stp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst);
inline void Sxtb(const Register& rd, const Register& rn);
inline void Sxth(const Register& rd, const Register& rn);
inline void Sxtw(const Register& rd, const Register& rn);
@@ -531,6 +565,7 @@ class MacroAssembler : public Assembler {
const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+ void Push(const Register& src0, const FPRegister& src1);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
@@ -606,7 +641,7 @@ class MacroAssembler : public Assembler {
explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
~PushPopQueue() {
- ASSERT(queued_.empty());
+ DCHECK(queued_.empty());
}
void Queue(const CPURegister& rt) {
@@ -614,7 +649,11 @@ class MacroAssembler : public Assembler {
queued_.push_back(rt);
}
- void PushQueued();
+ enum PreambleDirective {
+ WITH_PREAMBLE,
+ SKIP_PREAMBLE
+ };
+ void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
void PopQueued();
private:
@@ -718,9 +757,11 @@ class MacroAssembler : public Assembler {
// it can be evidence of a potential bug because the ABI forbids accesses
// below csp.
//
- // If emit_debug_code() is false, this emits no code.
+ // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
+ // enabled, then csp will be dereferenced to cause the processor
+ // (or simulator) to abort if it is not properly aligned.
//
- // If StackPointer() is the system stack pointer, this emits no code.
+ // If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
// Preserve the callee-saved registers (as defined by AAPCS64).
@@ -752,7 +793,7 @@ class MacroAssembler : public Assembler {
// Set the current stack pointer, but don't generate any code.
inline void SetStackPointer(const Register& stack_pointer) {
- ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+ DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
sp_ = stack_pointer;
}
@@ -766,8 +807,8 @@ class MacroAssembler : public Assembler {
inline void AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
- ASSERT(sp_alignment >= 16);
- ASSERT(IsPowerOf2(sp_alignment));
+ DCHECK(sp_alignment >= 16);
+ DCHECK(IsPowerOf2(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
@@ -778,12 +819,22 @@ class MacroAssembler : public Assembler {
//
// This is necessary when pushing or otherwise adding things to the stack, to
// satisfy the AAPCS64 constraint that the memory below the system stack
- // pointer is not accessed.
+ // pointer is not accessed. The amount pushed will be increased as necessary
+ // to ensure csp remains aligned to 16 bytes.
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void BumpSystemStackPointer(const Operand& space);
+ // Re-synchronizes the system stack pointer (csp) with the current stack
+ // pointer (according to StackPointer()). This function will ensure the
+ // new value of the system stack pointer is remains aligned to 16 bytes, and
+ // is lower than or equal to the value of the current stack pointer.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ inline void SyncSystemStackPointer();
+
// Helpers ------------------------------------------------------------------
// Root register.
inline void InitializeRootRegister();
@@ -812,7 +863,7 @@ class MacroAssembler : public Assembler {
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
- ASSERT(object->IsSmi());
+ DCHECK(object->IsSmi());
Mov(result, Operand(object));
}
}
@@ -830,10 +881,15 @@ class MacroAssembler : public Assembler {
void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
- void DecodeField(Register reg) {
- static const uint64_t shift = Field::kShift + kSmiShift;
+ void DecodeField(Register dst, Register src) {
+ static const uint64_t shift = Field::kShift;
static const uint64_t setbits = CountSetBits(Field::kMask, 32);
- Ubfx(reg, reg, shift, setbits);
+ Ubfx(dst, src, shift, setbits);
+ }
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
}
// ---- SMI and Number Utilities ----
@@ -849,6 +905,10 @@ class MacroAssembler : public Assembler {
Register src,
UntagMode mode = kNotSpeculativeUntag);
+ // Tag and push in one step.
+ inline void SmiTagAndPush(Register src);
+ inline void SmiTagAndPush(Register src1, Register src2);
+
// Compute the absolute value of 'smi' and leave the result in 'smi'
// register. If 'smi' is the most negative SMI, the absolute value cannot
// be represented as a SMI and a jump to 'slow' is done.
@@ -907,6 +967,10 @@ class MacroAssembler : public Assembler {
// Jump to label if the input double register contains -0.0.
void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
+ // Jump to label if the input integer register contains the double precision
+ // floating point representation of -0.0.
+ void JumpIfMinusZero(Register input, Label* on_negative_zero);
+
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
@@ -939,7 +1003,7 @@ class MacroAssembler : public Assembler {
FPRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL) {
- ASSERT(as_int.Is32Bits());
+ DCHECK(as_int.Is32Bits());
TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
on_failed_conversion);
}
@@ -954,7 +1018,7 @@ class MacroAssembler : public Assembler {
FPRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL) {
- ASSERT(as_int.Is64Bits());
+ DCHECK(as_int.Is64Bits());
TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
on_failed_conversion);
}
@@ -1048,15 +1112,6 @@ class MacroAssembler : public Assembler {
Register scratch3,
Register scratch4);
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cond, BailoutReason reason);
-
- // Throw a message string as an exception if the value is a smi.
- void ThrowIfSmi(const Register& value, BailoutReason reason);
-
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
void TailCallStub(CodeStub* stub);
@@ -1351,7 +1406,8 @@ class MacroAssembler : public Assembler {
Register scratch1,
Register scratch2,
CPURegister value = NoFPReg,
- CPURegister heap_number_map = NoReg);
+ CPURegister heap_number_map = NoReg,
+ MutableMode mode = IMMUTABLE);
// ---------------------------------------------------------------------------
// Support functions.
@@ -1636,7 +1692,8 @@ class MacroAssembler : public Assembler {
void ExitFrameRestoreFPRegs();
// Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Enter exit frame. Exit frames are used when calling C code from generated
// (JavaScript) code.
@@ -1771,7 +1828,9 @@ class MacroAssembler : public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
@@ -1783,7 +1842,9 @@ class MacroAssembler : public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
@@ -1791,9 +1852,17 @@ class MacroAssembler : public Assembler {
lr_status,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp);
+
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
@@ -1804,7 +1873,9 @@ class MacroAssembler : public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
@@ -1918,12 +1989,13 @@ class MacroAssembler : public Assembler {
// (such as %e, %f or %g) are FPRegisters, and that arguments for integer
// placeholders are Registers.
//
- // A maximum of four arguments may be given to any single Printf call. The
- // arguments must be of the same type, but they do not need to have the same
- // size.
+ // At the moment it is only possible to print the value of csp if it is the
+ // current stack pointer. Otherwise, the MacroAssembler will automatically
+ // update csp on every push (using BumpSystemStackPointer), so determining its
+ // value is difficult.
//
- // The following registers cannot be printed:
- // StackPointer(), csp.
+ // Format placeholders that refer to more than one argument, or to a specific
+ // argument, are not supported. This includes formats like "%1$d" or "%.*d".
//
// This function automatically preserves caller-saved registers so that
// calling code can use Printf at any point without having to worry about
@@ -1931,15 +2003,11 @@ class MacroAssembler : public Assembler {
// a problem, preserve the important registers manually and then call
// PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
// implicitly preserved.
- //
- // This function assumes (and asserts) that the current stack pointer is
- // callee-saved, not caller-saved. This is most likely the case anyway, as a
- // caller-saved stack pointer doesn't make a lot of sense.
void Printf(const char * format,
- const CPURegister& arg0 = NoCPUReg,
- const CPURegister& arg1 = NoCPUReg,
- const CPURegister& arg2 = NoCPUReg,
- const CPURegister& arg3 = NoCPUReg);
+ CPURegister arg0 = NoCPUReg,
+ CPURegister arg1 = NoCPUReg,
+ CPURegister arg2 = NoCPUReg,
+ CPURegister arg3 = NoCPUReg);
// Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
//
@@ -1993,6 +2061,15 @@ class MacroAssembler : public Assembler {
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
+ // Perform necessary maintenance operations before a push or after a pop.
+ //
+ // Note that size is specified in bytes.
+ void PushPreamble(Operand total_size);
+ void PopPostamble(Operand total_size);
+
+ void PushPreamble(int count, int size) { PushPreamble(count * size); }
+ void PopPostamble(int count, int size) { PopPostamble(count * size); }
+
private:
// Helpers for CopyFields.
// These each implement CopyFields in a different way.
@@ -2020,22 +2097,15 @@ class MacroAssembler : public Assembler {
const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3);
- // Perform necessary maintenance operations before a push or pop.
- //
- // Note that size is specified in bytes.
- void PrepareForPush(Operand total_size);
- void PrepareForPop(Operand total_size);
-
- void PrepareForPush(int count, int size) { PrepareForPush(count * size); }
- void PrepareForPop(int count, int size) { PrepareForPop(count * size); }
-
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack (csp) must be prepared by the caller as for a normal
// AAPCS64 call to 'printf'.
//
- // The 'type' argument specifies the type of the optional arguments.
- void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
+ // The 'args' argument should point to an array of variable arguments in their
+ // proper PCS registers (and in calling order). The argument registers can
+ // have mixed types. The format string (x0) should not be included.
+ void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
// Helper for throwing exceptions. Compute a handler address and jump to
// it. See the implementation for register usage.
@@ -2131,7 +2201,7 @@ class MacroAssembler : public Assembler {
// emitted is what you specified when creating the scope.
class InstructionAccurateScope BASE_EMBEDDED {
public:
- InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
+ explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
: masm_(masm)
#ifdef DEBUG
,
@@ -2156,7 +2226,7 @@ class InstructionAccurateScope BASE_EMBEDDED {
masm_->EndBlockPools();
#ifdef DEBUG
if (start_.is_bound()) {
- ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
}
masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
#endif
@@ -2186,8 +2256,8 @@ class UseScratchRegisterScope {
availablefp_(masm->FPTmpList()),
old_available_(available_->list()),
old_availablefp_(availablefp_->list()) {
- ASSERT(available_->type() == CPURegister::kRegister);
- ASSERT(availablefp_->type() == CPURegister::kFPRegister);
+ DCHECK(available_->type() == CPURegister::kRegister);
+ DCHECK(availablefp_->type() == CPURegister::kFPRegister);
}
~UseScratchRegisterScope();
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
index 97040cf75..432d9568b 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -2,18 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "cpu-profiler.h"
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "arm64/regexp-macro-assembler-arm64.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
+#include "src/arm64/regexp-macro-assembler-arm64.h"
namespace v8 {
namespace internal {
@@ -125,7 +126,7 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
backtrack_label_(),
exit_label_() {
__ SetStackPointer(csp);
- ASSERT_EQ(0, registers_to_save % 2);
+ DCHECK_EQ(0, registers_to_save % 2);
// We can cache at most 16 W registers in x0-x7.
STATIC_ASSERT(kNumCachedRegisters <= 16);
STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
@@ -160,7 +161,7 @@ void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
- ASSERT((reg >= 0) && (reg < num_registers_));
+ DCHECK((reg >= 0) && (reg < num_registers_));
if (by != 0) {
Register to_advance;
RegisterState register_state = GetRegisterState(reg);
@@ -261,7 +262,7 @@ void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
for (int i = 0; i < str.length(); i++) {
if (mode_ == ASCII) {
__ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
- ASSERT(str[i] <= String::kMaxOneByteCharCode);
+ DCHECK(str[i] <= String::kMaxOneByteCharCode);
} else {
__ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
}
@@ -288,10 +289,10 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
// Save the capture length in a callee-saved register so it will
// be preserved if we call a C helper.
Register capture_length = w19;
- ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
+ DCHECK(kCalleeSaved.IncludesAliasOf(capture_length));
// Find length of back-referenced capture.
- ASSERT((start_reg % 2) == 0);
+ DCHECK((start_reg % 2) == 0);
if (start_reg < kNumCachedRegisters) {
__ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
@@ -364,12 +365,12 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Check(le, kOffsetOutOfRange);
}
} else {
- ASSERT(mode_ == UC16);
+ DCHECK(mode_ == UC16);
int argument_count = 4;
// The cached registers need to be retained.
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
- ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+ DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
__ PushCPURegList(cached_registers);
// Put arguments into arguments registers.
@@ -396,11 +397,14 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
}
// Check if function returned non-zero for success or zero for failure.
- CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
+ // x0 is one of the registers used as a cache so it must be tested before
+ // the cache is restored.
+ __ Cmp(x0, 0);
+ __ PopCPURegList(cached_registers);
+ BranchOrBacktrack(eq, on_no_match);
+
// On success, increment position by length of capture.
__ Add(current_input_offset(), current_input_offset(), capture_length);
- // Reset the cached registers.
- __ PopCPURegList(cached_registers);
}
__ Bind(&fallthrough);
@@ -417,7 +421,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
Register capture_length = w15;
// Find length of back-referenced capture.
- ASSERT((start_reg % 2) == 0);
+ DCHECK((start_reg % 2) == 0);
if (start_reg < kNumCachedRegisters) {
__ Mov(x10, GetCachedRegister(start_reg));
__ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
@@ -447,7 +451,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
__ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
__ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
} else {
- ASSERT(mode_ == UC16);
+ DCHECK(mode_ == UC16);
__ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
__ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
}
@@ -495,7 +499,7 @@ void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
+ DCHECK(minus < String::kMaxUtf16CodeUnit);
__ Sub(w10, current_character(), minus);
__ And(w10, w10, mask);
CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
@@ -677,10 +681,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
CPURegList argument_registers(x0, x5, x6, x7);
CPURegList registers_to_retain = kCalleeSaved;
- ASSERT(kCalleeSaved.Count() == 11);
+ DCHECK(kCalleeSaved.Count() == 11);
registers_to_retain.Combine(lr);
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
__ PushCPURegList(registers_to_retain);
__ PushCPURegList(argument_registers);
@@ -704,7 +708,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Make sure the stack alignment will be respected.
int alignment = masm_->ActivationFrameAlignment();
- ASSERT_EQ(alignment % 16, 0);
+ DCHECK_EQ(alignment % 16, 0);
int align_mask = (alignment / kWRegSize) - 1;
num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
@@ -857,7 +861,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
Register base = x10;
// There are always an even number of capture registers. A couple of
// registers determine one match with two offsets.
- ASSERT_EQ(0, num_registers_left_on_stack % 2);
+ DCHECK_EQ(0, num_registers_left_on_stack % 2);
__ Add(base, frame_pointer(), kFirstCaptureOnStack);
// We can unroll the loop here, we should not unroll for less than 2
@@ -974,8 +978,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Bind(&return_w0);
// Set stack pointer back to first register to retain
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
__ Mov(csp, fp);
+ __ AssertStackConsistency();
// Restore registers.
__ PopCPURegList(registers_to_retain);
@@ -986,7 +991,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Registers x0 to x7 are used to store the first captures, they need to be
// retained over calls to C++ code.
CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
- ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+ DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
if (check_preempt_label_.is_linked()) {
__ Bind(&check_preempt_label_);
@@ -1079,9 +1084,9 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
int characters) {
// TODO(pielan): Make sure long strings are caught before this, and not
// just asserted in debug mode.
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
// Be sane! (And ensure that an int32_t can be used to index the string)
- ASSERT(cp_offset < (1<<30));
+ DCHECK(cp_offset < (1<<30));
if (check_bounds) {
CheckPosition(cp_offset + characters - 1, on_end_of_input);
}
@@ -1174,7 +1179,7 @@ void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
Register set_to = wzr;
if (to != 0) {
set_to = w10;
@@ -1202,7 +1207,7 @@ void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
+ DCHECK(reg_from <= reg_to);
int num_registers = reg_to - reg_from + 1;
// If the first capture register is cached in a hardware register but not
@@ -1215,7 +1220,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
// Clear cached registers in pairs as far as possible.
while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
- ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
+ DCHECK(GetRegisterState(reg_from) == CACHED_LSW);
__ Mov(GetCachedRegister(reg_from), twice_non_position_value());
reg_from += 2;
num_registers -= 2;
@@ -1229,7 +1234,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
if (num_registers > 0) {
// If there are some remaining registers, they are stored on the stack.
- ASSERT(reg_from >= kNumCachedRegisters);
+ DCHECK(reg_from >= kNumCachedRegisters);
// Move down the indexes of the registers on stack to get the correct offset
// in memory.
@@ -1288,7 +1293,8 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
const byte** input_start,
const byte** input_end) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1311,11 +1317,11 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
// Current string.
bool is_ascii = subject->IsOneByteRepresentationUnderneath();
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
+ DCHECK(re_code->instruction_start() <= *return_address);
+ DCHECK(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- Object* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
@@ -1351,7 +1357,7 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
// be a sequential or external string with the same content.
// Update the start and end pointers in the stack frame to the current
// location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ DCHECK(StringShape(*subject_tmp).IsSequential() ||
StringShape(*subject_tmp).IsExternal());
// The original start address of the characters to match.
@@ -1404,11 +1410,11 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
// moved. Allocate extra space for 2 arguments passed by pointers.
// AAPCS64 requires the stack to be 16 byte aligned.
int alignment = masm_->ActivationFrameAlignment();
- ASSERT_EQ(alignment % 16, 0);
+ DCHECK_EQ(alignment % 16, 0);
int align_mask = (alignment / kXRegSize) - 1;
int xreg_to_claim = (3 + align_mask) & ~align_mask;
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
__ Claim(xreg_to_claim);
// CheckStackGuardState needs the end and start addresses of the input string.
@@ -1438,7 +1444,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Peek(input_start(), kPointerSize);
__ Peek(input_end(), 2 * kPointerSize);
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
__ Drop(xreg_to_claim);
// Reload the Code pointer.
@@ -1458,12 +1464,7 @@ void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
if (to == NULL) {
to = &backtrack_label_;
}
- // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
- Condition inverted_condition = InvertCondition(condition);
- Label no_branch;
- __ B(inverted_condition, &no_branch);
- __ B(to);
- __ Bind(&no_branch);
+ __ B(condition, to);
}
void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
@@ -1474,15 +1475,11 @@ void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
if (to == NULL) {
to = &backtrack_label_;
}
- // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
- Label no_branch;
if (condition == eq) {
- __ Cbnz(reg, &no_branch);
+ __ Cbz(reg, to);
} else {
- __ Cbz(reg, &no_branch);
+ __ Cbnz(reg, to);
}
- __ B(to);
- __ Bind(&no_branch);
} else {
__ Cmp(reg, immediate);
BranchOrBacktrack(condition, to);
@@ -1496,7 +1493,7 @@ void RegExpMacroAssemblerARM64::CheckPreemption() {
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
__ Cmp(csp, x10);
CallIf(&check_preempt_label_, ls);
}
@@ -1513,8 +1510,8 @@ void RegExpMacroAssemblerARM64::CheckStackLimit() {
void RegExpMacroAssemblerARM64::Push(Register source) {
- ASSERT(source.Is32Bits());
- ASSERT(!source.is(backtrack_stackpointer()));
+ DCHECK(source.Is32Bits());
+ DCHECK(!source.is(backtrack_stackpointer()));
__ Str(source,
MemOperand(backtrack_stackpointer(),
-static_cast<int>(kWRegSize),
@@ -1523,23 +1520,23 @@ void RegExpMacroAssemblerARM64::Push(Register source) {
void RegExpMacroAssemblerARM64::Pop(Register target) {
- ASSERT(target.Is32Bits());
- ASSERT(!target.is(backtrack_stackpointer()));
+ DCHECK(target.Is32Bits());
+ DCHECK(!target.is(backtrack_stackpointer()));
__ Ldr(target,
MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
}
Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
- ASSERT(register_index < kNumCachedRegisters);
+ DCHECK(register_index < kNumCachedRegisters);
return Register::Create(register_index / 2, kXRegSizeInBits);
}
Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
Register maybe_result) {
- ASSERT(maybe_result.Is32Bits());
- ASSERT(register_index >= 0);
+ DCHECK(maybe_result.Is32Bits());
+ DCHECK(register_index >= 0);
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
@@ -1562,15 +1559,15 @@ Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
UNREACHABLE();
break;
}
- ASSERT(result.Is32Bits());
+ DCHECK(result.Is32Bits());
return result;
}
void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
Register source) {
- ASSERT(source.Is32Bits());
- ASSERT(register_index >= 0);
+ DCHECK(source.Is32Bits());
+ DCHECK(register_index >= 0);
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
@@ -1600,29 +1597,29 @@ void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
Label skip_call;
- if (condition != al) __ B(&skip_call, InvertCondition(condition));
+ if (condition != al) __ B(&skip_call, NegateCondition(condition));
__ Bl(to);
__ Bind(&skip_call);
}
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
__ Pop(lr, xzr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
__ Sub(lr, lr, Operand(masm_->CodeObject()));
__ Push(xzr, lr);
}
MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- ASSERT(register_index >= kNumCachedRegisters);
+ DCHECK(register_index < (1<<30));
+ DCHECK(register_index >= kNumCachedRegisters);
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
@@ -1633,10 +1630,10 @@ MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
Register scratch) {
- ASSERT(register_index < (1<<30));
- ASSERT(register_index < num_saved_registers_);
- ASSERT(register_index >= kNumCachedRegisters);
- ASSERT_EQ(register_index % 2, 0);
+ DCHECK(register_index < (1<<30));
+ DCHECK(register_index < num_saved_registers_);
+ DCHECK(register_index >= kNumCachedRegisters);
+ DCHECK_EQ(register_index % 2, 0);
register_index -= kNumCachedRegisters;
int offset = kFirstCaptureOnStack - register_index * kWRegSize;
// capture_location is used with Stp instructions to load/store 2 registers.
@@ -1662,7 +1659,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
// disable it.
// TODO(pielan): See whether or not we should disable unaligned accesses.
if (!CanReadUnaligned()) {
- ASSERT(characters == 1);
+ DCHECK(characters == 1);
}
if (cp_offset != 0) {
@@ -1684,15 +1681,15 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
} else {
- ASSERT(characters == 1);
+ DCHECK(characters == 1);
__ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
}
} else {
- ASSERT(mode_ == UC16);
+ DCHECK(mode_ == UC16);
if (characters == 2) {
__ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
} else {
- ASSERT(characters == 1);
+ DCHECK(characters == 1);
__ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
}
}
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
index 5d0d925ec..a27cff056 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -5,9 +5,10 @@
#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
-#include "arm64/assembler-arm64.h"
-#include "arm64/assembler-arm64-inl.h"
-#include "macro-assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
@@ -230,7 +231,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
};
RegisterState GetRegisterState(int register_index) {
- ASSERT(register_index >= 0);
+ DCHECK(register_index >= 0);
if (register_index >= kNumCachedRegisters) {
return STACKED;
} else {
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 3c970f854..cde93db98 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -5,15 +5,16 @@
#include <stdlib.h>
#include <cmath>
#include <cstdarg>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "disasm.h"
-#include "assembler.h"
-#include "arm64/decoder-arm64-inl.h"
-#include "arm64/simulator-arm64.h"
-#include "macro-assembler.h"
+#include "src/arm64/decoder-arm64-inl.h"
+#include "src/arm64/simulator-arm64.h"
+#include "src/assembler.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -61,7 +62,7 @@ void Simulator::TraceSim(const char* format, ...) {
if (FLAG_trace_sim) {
va_list arguments;
va_start(arguments, format);
- OS::VFPrint(stream_, format, arguments);
+ base::OS::VFPrint(stream_, format, arguments);
va_end(arguments);
}
}
@@ -72,11 +73,11 @@ const Instruction* Simulator::kEndOfSimAddress = NULL;
void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
int width = msb - lsb + 1;
- ASSERT(is_uintn(bits, width) || is_intn(bits, width));
+ DCHECK(is_uintn(bits, width) || is_intn(bits, width));
bits <<= lsb;
uint32_t mask = ((1 << width) - 1) << lsb;
- ASSERT((mask & write_ignore_mask_) == 0);
+ DCHECK((mask & write_ignore_mask_) == 0);
value_ = (value_ & ~mask) | (bits & mask);
}
@@ -106,7 +107,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator* Simulator::current(Isolate* isolate) {
Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
- ASSERT(isolate_data != NULL);
+ DCHECK(isolate_data != NULL);
Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
@@ -134,7 +135,7 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
} else if (arg.IsD() && (index_d < 8)) {
set_dreg_bits(index_d++, arg.bits());
} else {
- ASSERT(arg.IsD() || arg.IsX());
+ DCHECK(arg.IsD() || arg.IsX());
stack_args.push_back(arg.bits());
}
}
@@ -143,8 +144,8 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
uintptr_t original_stack = sp();
uintptr_t entry_stack = original_stack -
stack_args.size() * sizeof(stack_args[0]);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
}
char * stack = reinterpret_cast<char*>(entry_stack);
std::vector<int64_t>::const_iterator it;
@@ -153,7 +154,7 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
stack += sizeof(*it);
}
- ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
+ DCHECK(reinterpret_cast<uintptr_t>(stack) <= original_stack);
set_sp(entry_stack);
// Call the generated code.
@@ -255,7 +256,7 @@ void Simulator::CheckPCSComplianceAndRun() {
CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
}
for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
- ASSERT(saved_fpregisters[i] ==
+ DCHECK(saved_fpregisters[i] ==
dreg_bits(fpregister_list.PopLowestIndex().code()));
}
@@ -288,7 +289,7 @@ void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
set_xreg(code, value | code);
}
} else {
- ASSERT(list->type() == CPURegister::kFPRegister);
+ DCHECK(list->type() == CPURegister::kFPRegister);
while (!list->IsEmpty()) {
unsigned code = list->PopLowestIndex().code();
set_dreg_bits(code, value | code);
@@ -310,7 +311,7 @@ void Simulator::CorruptAllCallerSavedCPURegisters() {
// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
uintptr_t Simulator::PushAddress(uintptr_t address) {
- ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
intptr_t new_sp = sp() - 2 * kXRegSize;
uintptr_t* alignment_slot =
reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
@@ -326,7 +327,7 @@ uintptr_t Simulator::PopAddress() {
intptr_t current_sp = sp();
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
uintptr_t address = *stack_slot;
- ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
set_sp(current_sp + 2 * kXRegSize);
return address;
}
@@ -480,7 +481,7 @@ class Redirection {
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
- ASSERT_EQ(current->type(), type);
+ DCHECK_EQ(current->type(), type);
return current;
}
}
@@ -764,7 +765,7 @@ const char* Simulator::vreg_names[] = {
const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
- ASSERT(code < kNumberOfRegisters);
+ DCHECK(code < kNumberOfRegisters);
// If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1;
@@ -774,7 +775,7 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
- ASSERT(code < kNumberOfRegisters);
+ DCHECK(code < kNumberOfRegisters);
// If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1;
@@ -784,19 +785,19 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::SRegNameForCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
+ DCHECK(code < kNumberOfFPRegisters);
return sreg_names[code];
}
const char* Simulator::DRegNameForCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
+ DCHECK(code < kNumberOfFPRegisters);
return dreg_names[code];
}
const char* Simulator::VRegNameForCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
+ DCHECK(code < kNumberOfFPRegisters);
return vreg_names[code];
}
@@ -823,49 +824,30 @@ int Simulator::CodeFromName(const char* name) {
// Helpers ---------------------------------------------------------------------
-int64_t Simulator::AddWithCarry(unsigned reg_size,
- bool set_flags,
- int64_t src1,
- int64_t src2,
- int64_t carry_in) {
- ASSERT((carry_in == 0) || (carry_in == 1));
- ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
-
- uint64_t u1, u2;
- int64_t result;
- int64_t signed_sum = src1 + src2 + carry_in;
+template <typename T>
+T Simulator::AddWithCarry(bool set_flags,
+ T src1,
+ T src2,
+ T carry_in) {
+ typedef typename make_unsigned<T>::type unsignedT;
+ DCHECK((carry_in == 0) || (carry_in == 1));
+
+ T signed_sum = src1 + src2 + carry_in;
+ T result = signed_sum;
bool N, Z, C, V;
- if (reg_size == kWRegSizeInBits) {
- u1 = static_cast<uint64_t>(src1) & kWRegMask;
- u2 = static_cast<uint64_t>(src2) & kWRegMask;
-
- result = signed_sum & kWRegMask;
- // Compute the C flag by comparing the sum to the max unsigned integer.
- C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
- ((kWMaxUInt - u1 - carry_in) < u2);
- // Overflow iff the sign bit is the same for the two inputs and different
- // for the result.
- int64_t s_src1 = src1 << (kXRegSizeInBits - kWRegSizeInBits);
- int64_t s_src2 = src2 << (kXRegSizeInBits - kWRegSizeInBits);
- int64_t s_result = result << (kXRegSizeInBits - kWRegSizeInBits);
- V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
+ // Compute the C flag
+ unsignedT u1 = static_cast<unsignedT>(src1);
+ unsignedT u2 = static_cast<unsignedT>(src2);
+ unsignedT urest = std::numeric_limits<unsignedT>::max() - u1;
+ C = (u2 > urest) || (carry_in && (((u2 + 1) > urest) || (u2 > (urest - 1))));
- } else {
- u1 = static_cast<uint64_t>(src1);
- u2 = static_cast<uint64_t>(src2);
-
- result = signed_sum;
- // Compute the C flag by comparing the sum to the max unsigned integer.
- C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
- ((kXMaxUInt - u1 - carry_in) < u2);
- // Overflow iff the sign bit is the same for the two inputs and different
- // for the result.
- V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
- }
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
- N = CalcNFlag(result, reg_size);
+ N = CalcNFlag(result);
Z = CalcZFlag(result);
if (set_flags) {
@@ -878,33 +860,42 @@ int64_t Simulator::AddWithCarry(unsigned reg_size,
}
-int64_t Simulator::ShiftOperand(unsigned reg_size,
- int64_t value,
- Shift shift_type,
- unsigned amount) {
+template<typename T>
+void Simulator::AddSubWithCarry(Instruction* instr) {
+ T op2 = reg<T>(instr->Rm());
+ T new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry<T>(instr->FlagsUpdate(),
+ reg<T>(instr->Rn()),
+ op2,
+ nzcv().C());
+
+ set_reg<T>(instr->Rd(), new_val);
+}
+
+template <typename T>
+T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
+ typedef typename make_unsigned<T>::type unsignedT;
+
if (amount == 0) {
return value;
}
- int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask;
+
switch (shift_type) {
case LSL:
- return (value << amount) & mask;
+ return value << amount;
case LSR:
- return static_cast<uint64_t>(value) >> amount;
- case ASR: {
- // Shift used to restore the sign.
- unsigned s_shift = kXRegSizeInBits - reg_size;
- // Value with its sign restored.
- int64_t s_value = (value << s_shift) >> s_shift;
- return (s_value >> amount) & mask;
- }
- case ROR: {
- if (reg_size == kWRegSizeInBits) {
- value &= kWRegMask;
- }
- return (static_cast<uint64_t>(value) >> amount) |
- ((value & ((1L << amount) - 1L)) << (reg_size - amount));
- }
+ return static_cast<unsignedT>(value) >> amount;
+ case ASR:
+ return value >> amount;
+ case ROR:
+ return (static_cast<unsignedT>(value) >> amount) |
+ ((value & ((1L << amount) - 1L)) <<
+ (sizeof(unsignedT) * 8 - amount));
default:
UNIMPLEMENTED();
return 0;
@@ -912,10 +903,12 @@ int64_t Simulator::ShiftOperand(unsigned reg_size,
}
-int64_t Simulator::ExtendValue(unsigned reg_size,
- int64_t value,
- Extend extend_type,
- unsigned left_shift) {
+template <typename T>
+T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
+ const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
+ const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8;
+ const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8;
+
switch (extend_type) {
case UXTB:
value &= kByteMask;
@@ -927,13 +920,13 @@ int64_t Simulator::ExtendValue(unsigned reg_size,
value &= kWordMask;
break;
case SXTB:
- value = (value << 56) >> 56;
+ value = (value << kSignExtendBShift) >> kSignExtendBShift;
break;
case SXTH:
- value = (value << 48) >> 48;
+ value = (value << kSignExtendHShift) >> kSignExtendHShift;
break;
case SXTW:
- value = (value << 32) >> 32;
+ value = (value << kSignExtendWShift) >> kSignExtendWShift;
break;
case UXTX:
case SXTX:
@@ -941,8 +934,21 @@ int64_t Simulator::ExtendValue(unsigned reg_size,
default:
UNREACHABLE();
}
- int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask;
- return (value << left_shift) & mask;
+ return value << left_shift;
+}
+
+
+template <typename T>
+void Simulator::Extract(Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ T op2 = reg<T>(instr->Rm());
+ T result = op2;
+
+ if (lsb) {
+ T op1 = reg<T>(instr->Rn());
+ result = op2 >> lsb | (op1 << ((sizeof(T) * 8) - lsb));
+ }
+ set_reg<T>(instr->Rd(), result);
}
@@ -1059,7 +1065,7 @@ void Simulator::PrintSystemRegisters(bool print_all) {
"0b10 (Round towards Minus Infinity)",
"0b11 (Round towards Zero)"
};
- ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
+ DCHECK(fpcr().RMode() < ARRAY_SIZE(rmode));
fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
clr_flag_name,
clr_flag_value,
@@ -1199,7 +1205,7 @@ void Simulator::VisitUnconditionalBranch(Instruction* instr) {
void Simulator::VisitConditionalBranch(Instruction* instr) {
- ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ DCHECK(instr->Mask(ConditionalBranchMask) == B_cond);
if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
set_pc(instr->ImmPCOffsetTarget());
}
@@ -1256,110 +1262,110 @@ void Simulator::VisitCompareBranch(Instruction* instr) {
}
-void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
+template<typename T>
+void Simulator::AddSubHelper(Instruction* instr, T op2) {
bool set_flags = instr->FlagsUpdate();
- int64_t new_val = 0;
+ T new_val = 0;
Instr operation = instr->Mask(AddSubOpMask);
switch (operation) {
case ADD:
case ADDS: {
- new_val = AddWithCarry(reg_size,
- set_flags,
- reg(reg_size, instr->Rn(), instr->RnMode()),
- op2);
+ new_val = AddWithCarry<T>(set_flags,
+ reg<T>(instr->Rn(), instr->RnMode()),
+ op2);
break;
}
case SUB:
case SUBS: {
- new_val = AddWithCarry(reg_size,
- set_flags,
- reg(reg_size, instr->Rn(), instr->RnMode()),
- ~op2,
- 1);
+ new_val = AddWithCarry<T>(set_flags,
+ reg<T>(instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
break;
}
default: UNREACHABLE();
}
- set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
+ set_reg<T>(instr->Rd(), new_val, instr->RdMode());
}
void Simulator::VisitAddSubShifted(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- int64_t op2 = ShiftOperand(reg_size,
- reg(reg_size, instr->Rm()),
- static_cast<Shift>(instr->ShiftDP()),
- instr->ImmDPShift());
- AddSubHelper(instr, op2);
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+
+ if (instr->SixtyFourBits()) {
+ int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
+ AddSubHelper(instr, op2);
+ } else {
+ int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
+ AddSubHelper(instr, op2);
+ }
}
void Simulator::VisitAddSubImmediate(Instruction* instr) {
int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
- AddSubHelper(instr, op2);
+ if (instr->SixtyFourBits()) {
+ AddSubHelper<int64_t>(instr, op2);
+ } else {
+ AddSubHelper<int32_t>(instr, op2);
+ }
}
void Simulator::VisitAddSubExtended(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- int64_t op2 = ExtendValue(reg_size,
- reg(reg_size, instr->Rm()),
- static_cast<Extend>(instr->ExtendMode()),
- instr->ImmExtendShift());
- AddSubHelper(instr, op2);
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ unsigned left_shift = instr->ImmExtendShift();
+ if (instr->SixtyFourBits()) {
+ int64_t op2 = ExtendValue(xreg(instr->Rm()), ext, left_shift);
+ AddSubHelper(instr, op2);
+ } else {
+ int32_t op2 = ExtendValue(wreg(instr->Rm()), ext, left_shift);
+ AddSubHelper(instr, op2);
+ }
}
void Simulator::VisitAddSubWithCarry(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- int64_t op2 = reg(reg_size, instr->Rm());
- int64_t new_val;
-
- if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
- op2 = ~op2;
+ if (instr->SixtyFourBits()) {
+ AddSubWithCarry<int64_t>(instr);
+ } else {
+ AddSubWithCarry<int32_t>(instr);
}
-
- new_val = AddWithCarry(reg_size,
- instr->FlagsUpdate(),
- reg(reg_size, instr->Rn()),
- op2,
- nzcv().C());
-
- set_reg(reg_size, instr->Rd(), new_val);
}
void Simulator::VisitLogicalShifted(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
Shift shift_type = static_cast<Shift>(instr->ShiftDP());
unsigned shift_amount = instr->ImmDPShift();
- int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
- shift_amount);
- if (instr->Mask(NOT) == NOT) {
- op2 = ~op2;
+
+ if (instr->SixtyFourBits()) {
+ int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
+ op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
+ LogicalHelper<int64_t>(instr, op2);
+ } else {
+ int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
+ op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
+ LogicalHelper<int32_t>(instr, op2);
}
- LogicalHelper(instr, op2);
}
void Simulator::VisitLogicalImmediate(Instruction* instr) {
- LogicalHelper(instr, instr->ImmLogical());
+ if (instr->SixtyFourBits()) {
+ LogicalHelper<int64_t>(instr, instr->ImmLogical());
+ } else {
+ LogicalHelper<int32_t>(instr, instr->ImmLogical());
+ }
}
-void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- int64_t op1 = reg(reg_size, instr->Rn());
- int64_t result = 0;
+template<typename T>
+void Simulator::LogicalHelper(Instruction* instr, T op2) {
+ T op1 = reg<T>(instr->Rn());
+ T result = 0;
bool update_flags = false;
// Switch on the logical operation, stripping out the NOT bit, as it has a
@@ -1374,41 +1380,46 @@ void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
}
if (update_flags) {
- nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetN(CalcNFlag(result));
nzcv().SetZ(CalcZFlag(result));
nzcv().SetC(0);
nzcv().SetV(0);
}
- set_reg(reg_size, instr->Rd(), result, instr->RdMode());
+ set_reg<T>(instr->Rd(), result, instr->RdMode());
}
void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+ if (instr->SixtyFourBits()) {
+ ConditionalCompareHelper(instr, xreg(instr->Rm()));
+ } else {
+ ConditionalCompareHelper(instr, wreg(instr->Rm()));
+ }
}
void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
- ConditionalCompareHelper(instr, instr->ImmCondCmp());
+ if (instr->SixtyFourBits()) {
+ ConditionalCompareHelper<int64_t>(instr, instr->ImmCondCmp());
+ } else {
+ ConditionalCompareHelper<int32_t>(instr, instr->ImmCondCmp());
+ }
}
-void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- int64_t op1 = reg(reg_size, instr->Rn());
+template<typename T>
+void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
+ T op1 = reg<T>(instr->Rn());
if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
// If the condition passes, set the status flags to the result of comparing
// the operands.
if (instr->Mask(ConditionalCompareMask) == CCMP) {
- AddWithCarry(reg_size, true, op1, ~op2, 1);
+ AddWithCarry<T>(true, op1, ~op2, 1);
} else {
- ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
- AddWithCarry(reg_size, true, op1, op2, 0);
+ DCHECK(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry<T>(true, op1, op2, 0);
}
} else {
// If the condition fails, set the status flags to the nzcv immediate.
@@ -1440,11 +1451,10 @@ void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
Extend ext = static_cast<Extend>(instr->ExtendMode());
- ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
- int64_t offset = ExtendValue(kXRegSizeInBits, xreg(instr->Rm()), ext,
- shift_amount);
+ int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount);
LoadStoreHelper(instr, offset, Offset);
}
@@ -1484,28 +1494,23 @@ void Simulator::LoadStoreHelper(Instruction* instr,
case STR_w:
case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
case LDRSB_w: {
- set_wreg(srcdst,
- ExtendValue(kWRegSizeInBits, MemoryRead8(address), SXTB));
+ set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead8(address), SXTB));
break;
}
case LDRSB_x: {
- set_xreg(srcdst,
- ExtendValue(kXRegSizeInBits, MemoryRead8(address), SXTB));
+ set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead8(address), SXTB));
break;
}
case LDRSH_w: {
- set_wreg(srcdst,
- ExtendValue(kWRegSizeInBits, MemoryRead16(address), SXTH));
+ set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead16(address), SXTH));
break;
}
case LDRSH_x: {
- set_xreg(srcdst,
- ExtendValue(kXRegSizeInBits, MemoryRead16(address), SXTH));
+ set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead16(address), SXTH));
break;
}
case LDRSW_x: {
- set_xreg(srcdst,
- ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
+ set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
break;
}
case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
@@ -1581,7 +1586,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
// 'rt' and 'rt2' can only be aliased for stores.
- ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+ DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
switch (op) {
case LDP_w: {
@@ -1605,8 +1610,8 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
break;
}
case LDPSW_x: {
- set_xreg(rt, ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
- set_xreg(rt2, ExtendValue(kXRegSizeInBits,
+ set_xreg(rt, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
+ set_xreg(rt2, ExtendValue<int64_t>(
MemoryRead32(address + kWRegSize), SXTW));
break;
}
@@ -1689,7 +1694,7 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode) {
if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
- ASSERT(offset != 0);
+ DCHECK(offset != 0);
uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
set_reg(addr_reg, address + offset, Reg31IsStackPointer);
}
@@ -1709,8 +1714,8 @@ void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
- ASSERT(address != NULL);
- ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+ DCHECK(address != NULL);
+ DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
uint64_t read = 0;
memcpy(&read, address, num_bytes);
return read;
@@ -1750,8 +1755,8 @@ double Simulator::MemoryReadFP64(uint8_t* address) {
void Simulator::MemoryWrite(uint8_t* address,
uint64_t value,
unsigned num_bytes) {
- ASSERT(address != NULL);
- ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+ DCHECK(address != NULL);
+ DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
LogWrite(address, value, num_bytes);
memcpy(address, &value, num_bytes);
@@ -1785,7 +1790,7 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
bool is_64_bits = instr->SixtyFourBits() == 1;
// Shift is limited for W operations.
- ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+ DCHECK(is_64_bits || (instr->ShiftMoveWide() < 2));
// Get the shifted immediate.
int64_t shift = instr->ShiftMoveWide() * 16;
@@ -1822,25 +1827,26 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
void Simulator::VisitConditionalSelect(Instruction* instr) {
- uint64_t new_val = xreg(instr->Rn());
-
if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
- new_val = xreg(instr->Rm());
+ uint64_t new_val = xreg(instr->Rm());
switch (instr->Mask(ConditionalSelectMask)) {
- case CSEL_w:
- case CSEL_x: break;
- case CSINC_w:
- case CSINC_x: new_val++; break;
- case CSINV_w:
- case CSINV_x: new_val = ~new_val; break;
- case CSNEG_w:
- case CSNEG_x: new_val = -new_val; break;
+ case CSEL_w: set_wreg(instr->Rd(), new_val); break;
+ case CSEL_x: set_xreg(instr->Rd(), new_val); break;
+ case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break;
+ case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break;
+ case CSINV_w: set_wreg(instr->Rd(), ~new_val); break;
+ case CSINV_x: set_xreg(instr->Rd(), ~new_val); break;
+ case CSNEG_w: set_wreg(instr->Rd(), -new_val); break;
+ case CSNEG_x: set_xreg(instr->Rd(), -new_val); break;
default: UNIMPLEMENTED();
}
+ } else {
+ if (instr->SixtyFourBits()) {
+ set_xreg(instr->Rd(), xreg(instr->Rn()));
+ } else {
+ set_wreg(instr->Rd(), wreg(instr->Rn()));
+ }
}
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- set_reg(reg_size, instr->Rd(), new_val);
}
@@ -1874,7 +1880,7 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
- ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
+ DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
uint64_t result = 0;
for (unsigned i = 0; i < num_bits; i++) {
result = (result << 1) | (value & 1);
@@ -1898,7 +1904,7 @@ uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
// permute_table[Reverse16] is used by REV16_x, REV16_w
// permute_table[Reverse32] is used by REV32_x, REV_w
// permute_table[Reverse64] is used by REV_x
- ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
+ DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7} };
@@ -1911,28 +1917,17 @@ uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
}
-void Simulator::VisitDataProcessing2Source(Instruction* instr) {
+template <typename T>
+void Simulator::DataProcessing2Source(Instruction* instr) {
Shift shift_op = NO_SHIFT;
- int64_t result = 0;
+ T result = 0;
switch (instr->Mask(DataProcessing2SourceMask)) {
- case SDIV_w: {
- int32_t rn = wreg(instr->Rn());
- int32_t rm = wreg(instr->Rm());
- if ((rn == kWMinInt) && (rm == -1)) {
- result = kWMinInt;
- } else if (rm == 0) {
- // Division by zero can be trapped, but not on A-class processors.
- result = 0;
- } else {
- result = rn / rm;
- }
- break;
- }
+ case SDIV_w:
case SDIV_x: {
- int64_t rn = xreg(instr->Rn());
- int64_t rm = xreg(instr->Rm());
- if ((rn == kXMinInt) && (rm == -1)) {
- result = kXMinInt;
+ T rn = reg<T>(instr->Rn());
+ T rm = reg<T>(instr->Rm());
+ if ((rn == std::numeric_limits<T>::min()) && (rm == -1)) {
+ result = std::numeric_limits<T>::min();
} else if (rm == 0) {
// Division by zero can be trapped, but not on A-class processors.
result = 0;
@@ -1941,20 +1936,11 @@ void Simulator::VisitDataProcessing2Source(Instruction* instr) {
}
break;
}
- case UDIV_w: {
- uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
- uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
- if (rm == 0) {
- // Division by zero can be trapped, but not on A-class processors.
- result = 0;
- } else {
- result = rn / rm;
- }
- break;
- }
+ case UDIV_w:
case UDIV_x: {
- uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
- uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ typedef typename make_unsigned<T>::type unsignedT;
+ unsignedT rn = static_cast<unsignedT>(reg<T>(instr->Rn()));
+ unsignedT rm = static_cast<unsignedT>(reg<T>(instr->Rm()));
if (rm == 0) {
// Division by zero can be trapped, but not on A-class processors.
result = 0;
@@ -1974,18 +1960,27 @@ void Simulator::VisitDataProcessing2Source(Instruction* instr) {
default: UNIMPLEMENTED();
}
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
if (shift_op != NO_SHIFT) {
// Shift distance encoded in the least-significant five/six bits of the
// register.
- int mask = (instr->SixtyFourBits() == 1) ? kShiftAmountXRegMask
- : kShiftAmountWRegMask;
- unsigned shift = wreg(instr->Rm()) & mask;
- result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
- shift);
+ unsigned shift = wreg(instr->Rm());
+ if (sizeof(T) == kWRegSize) {
+ shift &= kShiftAmountWRegMask;
+ } else {
+ shift &= kShiftAmountXRegMask;
+ }
+ result = ShiftOperand(reg<T>(instr->Rn()), shift_op, shift);
+ }
+ set_reg<T>(instr->Rd(), result);
+}
+
+
+void Simulator::VisitDataProcessing2Source(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ DataProcessing2Source<int64_t>(instr);
+ } else {
+ DataProcessing2Source<int32_t>(instr);
}
- set_reg(reg_size, instr->Rd(), result);
}
@@ -2012,9 +2007,6 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
void Simulator::VisitDataProcessing3Source(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
-
int64_t result = 0;
// Extract and sign- or zero-extend 32-bit arguments for widening operations.
uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
@@ -2035,26 +2027,31 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
case SMULH_x:
- ASSERT(instr->Ra() == kZeroRegCode);
+ DCHECK(instr->Ra() == kZeroRegCode);
result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
break;
default: UNIMPLEMENTED();
}
- set_reg(reg_size, instr->Rd(), result);
+
+ if (instr->SixtyFourBits()) {
+ set_xreg(instr->Rd(), result);
+ } else {
+ set_wreg(instr->Rd(), result);
+ }
}
-void Simulator::VisitBitfield(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
- : kWRegSizeInBits;
- int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
- int64_t R = instr->ImmR();
- int64_t S = instr->ImmS();
- int64_t diff = S - R;
- int64_t mask;
+template <typename T>
+void Simulator::BitfieldHelper(Instruction* instr) {
+ typedef typename make_unsigned<T>::type unsignedT;
+ T reg_size = sizeof(T) * 8;
+ T R = instr->ImmR();
+ T S = instr->ImmS();
+ T diff = S - R;
+ T mask;
if (diff >= 0) {
- mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1
- : reg_mask;
+ mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
+ : static_cast<T>(-1);
} else {
mask = ((1L << (S + 1)) - 1);
mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
@@ -2083,30 +2080,37 @@ void Simulator::VisitBitfield(Instruction* instr) {
UNIMPLEMENTED();
}
- int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
- int64_t src = reg(reg_size, instr->Rn());
+ T dst = inzero ? 0 : reg<T>(instr->Rd());
+ T src = reg<T>(instr->Rn());
// Rotate source bitfield into place.
- int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R));
// Determine the sign extension.
- int64_t topbits_preshift = (1L << (reg_size - diff - 1)) - 1;
- int64_t signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0)
- << (diff + 1);
+ T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1;
+ T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0)
+ << (diff + 1);
// Merge sign extension, dest/zero and bitfield.
result = signbits | (result & mask) | (dst & ~mask);
- set_reg(reg_size, instr->Rd(), result);
+ set_reg<T>(instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(Instruction* instr) {
+ if (instr->SixtyFourBits()) {
+ BitfieldHelper<int64_t>(instr);
+ } else {
+ BitfieldHelper<int32_t>(instr);
+ }
}
void Simulator::VisitExtract(Instruction* instr) {
- unsigned lsb = instr->ImmS();
- unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
- : kWRegSizeInBits;
- set_reg(reg_size,
- instr->Rd(),
- (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
- (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
+ if (instr->SixtyFourBits()) {
+ Extract<uint64_t>(instr);
+ } else {
+ Extract<uint32_t>(instr);
+ }
}
@@ -2403,10 +2407,10 @@ void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
template <class T, int ebits, int mbits>
static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
FPRounding round_mode) {
- ASSERT((sign == 0) || (sign == 1));
+ DCHECK((sign == 0) || (sign == 1));
// Only the FPTieEven rounding mode is implemented.
- ASSERT(round_mode == FPTieEven);
+ DCHECK(round_mode == FPTieEven);
USE(round_mode);
// Rounding can promote subnormals to normals, and normals to infinities. For
@@ -2723,7 +2727,7 @@ double Simulator::FPToDouble(float value) {
float Simulator::FPToFloat(double value, FPRounding round_mode) {
// Only the FPTieEven rounding mode is implemented.
- ASSERT(round_mode == FPTieEven);
+ DCHECK(round_mode == FPTieEven);
USE(round_mode);
switch (std::fpclassify(value)) {
@@ -2852,7 +2856,7 @@ void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
template <typename T>
T Simulator::FPAdd(T op1, T op2) {
// NaNs should be handled elsewhere.
- ASSERT(!std::isnan(op1) && !std::isnan(op2));
+ DCHECK(!std::isnan(op1) && !std::isnan(op2));
if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
// inf + -inf returns the default NaN.
@@ -2867,7 +2871,7 @@ T Simulator::FPAdd(T op1, T op2) {
template <typename T>
T Simulator::FPDiv(T op1, T op2) {
// NaNs should be handled elsewhere.
- ASSERT(!std::isnan(op1) && !std::isnan(op2));
+ DCHECK(!std::isnan(op1) && !std::isnan(op2));
if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
// inf / inf and 0.0 / 0.0 return the default NaN.
@@ -2882,7 +2886,7 @@ T Simulator::FPDiv(T op1, T op2) {
template <typename T>
T Simulator::FPMax(T a, T b) {
// NaNs should be handled elsewhere.
- ASSERT(!std::isnan(a) && !std::isnan(b));
+ DCHECK(!std::isnan(a) && !std::isnan(b));
if ((a == 0.0) && (b == 0.0) &&
(copysign(1.0, a) != copysign(1.0, b))) {
@@ -2909,7 +2913,7 @@ T Simulator::FPMaxNM(T a, T b) {
template <typename T>
T Simulator::FPMin(T a, T b) {
// NaNs should be handled elsewhere.
- ASSERT(!isnan(a) && !isnan(b));
+ DCHECK(!std::isnan(a) && !std::isnan(b));
if ((a == 0.0) && (b == 0.0) &&
(copysign(1.0, a) != copysign(1.0, b))) {
@@ -2937,7 +2941,7 @@ T Simulator::FPMinNM(T a, T b) {
template <typename T>
T Simulator::FPMul(T op1, T op2) {
// NaNs should be handled elsewhere.
- ASSERT(!std::isnan(op1) && !std::isnan(op2));
+ DCHECK(!std::isnan(op1) && !std::isnan(op2));
if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
// inf * 0.0 returns the default NaN.
@@ -2982,7 +2986,7 @@ T Simulator::FPMulAdd(T a, T op1, T op2) {
}
result = FusedMultiplyAdd(op1, op2, a);
- ASSERT(!std::isnan(result));
+ DCHECK(!std::isnan(result));
// Work around broken fma implementations for rounded zero results: If a is
// 0.0, the sign of the result is the sign of op1 * op2 before rounding.
@@ -3009,7 +3013,7 @@ T Simulator::FPSqrt(T op) {
template <typename T>
T Simulator::FPSub(T op1, T op2) {
// NaNs should be handled elsewhere.
- ASSERT(!std::isnan(op1) && !std::isnan(op2));
+ DCHECK(!std::isnan(op1) && !std::isnan(op2));
if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
// inf - inf returns the default NaN.
@@ -3023,7 +3027,7 @@ T Simulator::FPSub(T op1, T op2) {
template <typename T>
T Simulator::FPProcessNaN(T op) {
- ASSERT(std::isnan(op));
+ DCHECK(std::isnan(op));
return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
}
@@ -3035,10 +3039,10 @@ T Simulator::FPProcessNaNs(T op1, T op2) {
} else if (IsSignallingNaN(op2)) {
return FPProcessNaN(op2);
} else if (std::isnan(op1)) {
- ASSERT(IsQuietNaN(op1));
+ DCHECK(IsQuietNaN(op1));
return FPProcessNaN(op1);
} else if (std::isnan(op2)) {
- ASSERT(IsQuietNaN(op2));
+ DCHECK(IsQuietNaN(op2));
return FPProcessNaN(op2);
} else {
return 0.0;
@@ -3055,13 +3059,13 @@ T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
} else if (IsSignallingNaN(op3)) {
return FPProcessNaN(op3);
} else if (std::isnan(op1)) {
- ASSERT(IsQuietNaN(op1));
+ DCHECK(IsQuietNaN(op1));
return FPProcessNaN(op1);
} else if (std::isnan(op2)) {
- ASSERT(IsQuietNaN(op2));
+ DCHECK(IsQuietNaN(op2));
return FPProcessNaN(op2);
} else if (std::isnan(op3)) {
- ASSERT(IsQuietNaN(op3));
+ DCHECK(IsQuietNaN(op3));
return FPProcessNaN(op3);
} else {
return 0.0;
@@ -3117,7 +3121,7 @@ void Simulator::VisitSystem(Instruction* instr) {
}
}
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
- ASSERT(instr->Mask(SystemHintMask) == HINT);
+ DCHECK(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
case NOP: break;
default: UNIMPLEMENTED();
@@ -3160,12 +3164,12 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
bool Simulator::PrintValue(const char* desc) {
if (strcmp(desc, "csp") == 0) {
- ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
return true;
} else if (strcmp(desc, "wcsp") == 0) {
- ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
return true;
@@ -3341,17 +3345,18 @@ void Simulator::Debug() {
(strcmp(cmd, "po") == 0)) {
if (argc == 2) {
int64_t value;
+ OFStream os(stdout);
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
+ os << arg1 << ": \n";
#ifdef DEBUG
- obj->PrintLn();
+ obj->Print(os);
+ os << "\n";
#else
- obj->ShortPrint();
- PrintF("\n");
+ os << Brief(obj) << "\n";
#endif
} else {
- PrintF("%s unrecognized\n", arg1);
+ os << arg1 << " unrecognized\n";
}
} else {
PrintF("printobject <value>\n"
@@ -3441,7 +3446,7 @@ void Simulator::Debug() {
// gdb -------------------------------------------------------------------
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("Relinquishing control to gdb.\n");
- OS::DebugBreak();
+ base::OS::DebugBreak();
PrintF("Regaining control from gdb.\n");
// sysregs ---------------------------------------------------------------
@@ -3556,7 +3561,7 @@ void Simulator::VisitException(Instruction* instr) {
break;
default:
// We don't support a one-shot LOG_DISASM.
- ASSERT((parameters & LOG_DISASM) == 0);
+ DCHECK((parameters & LOG_DISASM) == 0);
// Don't print information that is already being traced.
parameters &= ~log_parameters();
// Print the requested information.
@@ -3570,8 +3575,8 @@ void Simulator::VisitException(Instruction* instr) {
size_t size = kDebugMessageOffset + strlen(message) + 1;
pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
// - Verify that the unreachable marker is present.
- ASSERT(pc_->Mask(ExceptionMask) == HLT);
- ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
+ DCHECK(pc_->Mask(ExceptionMask) == HLT);
+ DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable);
// - Skip past the unreachable marker.
set_pc(pc_->following());
@@ -3581,43 +3586,7 @@ void Simulator::VisitException(Instruction* instr) {
} else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
DoRuntimeCall(instr);
} else if (instr->ImmException() == kImmExceptionIsPrintf) {
- // Read the argument encoded inline in the instruction stream.
- uint32_t type;
- memcpy(&type,
- pc_->InstructionAtOffset(kPrintfTypeOffset),
- sizeof(type));
-
- const char* format = reg<const char*>(0);
-
- // Pass all of the relevant PCS registers onto printf. It doesn't
- // matter if we pass too many as the extra ones won't be read.
- int result;
- fputs(clr_printf, stream_);
- if (type == CPURegister::kRegister) {
- result = fprintf(stream_, format,
- xreg(1), xreg(2), xreg(3), xreg(4),
- xreg(5), xreg(6), xreg(7));
- } else if (type == CPURegister::kFPRegister) {
- result = fprintf(stream_, format,
- dreg(0), dreg(1), dreg(2), dreg(3),
- dreg(4), dreg(5), dreg(6), dreg(7));
- } else {
- ASSERT(type == CPURegister::kNoRegister);
- result = fprintf(stream_, "%s", format);
- }
- fputs(clr_normal, stream_);
-
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
-
- set_xreg(0, result);
-
- // The printf parameters are inlined in the code, so skip them.
- set_pc(pc_->InstructionAtOffset(kPrintfLength));
-
- // Set LR as if we'd just called a native printf function.
- set_lr(pc());
+ DoPrintf(instr);
} else if (instr->ImmException() == kImmExceptionIsUnreachable) {
fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
@@ -3625,7 +3594,7 @@ void Simulator::VisitException(Instruction* instr) {
abort();
} else {
- OS::DebugBreak();
+ base::OS::DebugBreak();
}
break;
}
@@ -3635,6 +3604,133 @@ void Simulator::VisitException(Instruction* instr) {
}
}
+
+void Simulator::DoPrintf(Instruction* instr) {
+ DCHECK((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsPrintf));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t arg_count;
+ uint32_t arg_pattern_list;
+ STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&arg_count,
+ instr + kPrintfArgCountOffset,
+ sizeof(arg_count));
+ memcpy(&arg_pattern_list,
+ instr + kPrintfArgPatternListOffset,
+ sizeof(arg_pattern_list));
+
+ DCHECK(arg_count <= kPrintfMaxArgCount);
+ DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
+
+ // We need to call the host printf function with a set of arguments defined by
+ // arg_pattern_list. Because we don't know the types and sizes of the
+ // arguments, this is very difficult to do in a robust and portable way. To
+ // work around the problem, we pick apart the format string, and print one
+ // format placeholder at a time.
+
+ // Allocate space for the format string. We take a copy, so we can modify it.
+ // Leave enough space for one extra character per expected argument (plus the
+ // '\0' termination).
+ const char * format_base = reg<const char *>(0);
+ DCHECK(format_base != NULL);
+ size_t length = strlen(format_base) + 1;
+ char * const format = new char[length + arg_count];
+
+ // A list of chunks, each with exactly one format placeholder.
+ const char * chunks[kPrintfMaxArgCount];
+
+ // Copy the format string and search for format placeholders.
+ uint32_t placeholder_count = 0;
+ char * format_scratch = format;
+ for (size_t i = 0; i < length; i++) {
+ if (format_base[i] != '%') {
+ *format_scratch++ = format_base[i];
+ } else {
+ if (format_base[i + 1] == '%') {
+ // Ignore explicit "%%" sequences.
+ *format_scratch++ = format_base[i];
+
+ if (placeholder_count == 0) {
+ // The first chunk is passed to printf using "%s", so we need to
+ // unescape "%%" sequences in this chunk. (Just skip the next '%'.)
+ i++;
+ } else {
+ // Otherwise, pass through "%%" unchanged.
+ *format_scratch++ = format_base[++i];
+ }
+ } else {
+ CHECK(placeholder_count < arg_count);
+ // Insert '\0' before placeholders, and store their locations.
+ *format_scratch++ = '\0';
+ chunks[placeholder_count++] = format_scratch;
+ *format_scratch++ = format_base[i];
+ }
+ }
+ }
+ DCHECK(format_scratch <= (format + length + arg_count));
+ CHECK(placeholder_count == arg_count);
+
+ // Finally, call printf with each chunk, passing the appropriate register
+ // argument. Normally, printf returns the number of bytes transmitted, so we
+ // can emulate a single printf call by adding the result from each chunk. If
+ // any call returns a negative (error) value, though, just return that value.
+
+ fprintf(stream_, "%s", clr_printf);
+
+ // Because '\0' is inserted before each placeholder, the first string in
+ // 'format' contains no format placeholders and should be printed literally.
+ int result = fprintf(stream_, "%s", format);
+ int pcs_r = 1; // Start at x1. x0 holds the format string.
+ int pcs_f = 0; // Start at d0.
+ if (result >= 0) {
+ for (uint32_t i = 0; i < placeholder_count; i++) {
+ int part_result = -1;
+
+ uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
+ arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
+ switch (arg_pattern) {
+ case kPrintfArgW:
+ part_result = fprintf(stream_, chunks[i], wreg(pcs_r++));
+ break;
+ case kPrintfArgX:
+ part_result = fprintf(stream_, chunks[i], xreg(pcs_r++));
+ break;
+ case kPrintfArgD:
+ part_result = fprintf(stream_, chunks[i], dreg(pcs_f++));
+ break;
+ default: UNREACHABLE();
+ }
+
+ if (part_result < 0) {
+ // Handle error values.
+ result = part_result;
+ break;
+ }
+
+ result += part_result;
+ }
+ }
+
+ fprintf(stream_, "%s", clr_normal);
+
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+
+ // Printf returns its result in x0 (just like the C library's printf).
+ set_xreg(0, result);
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(instr->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ delete[] format;
+}
+
+
#endif // USE_SIMULATOR
} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 543385b37..6b0211816 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -8,16 +8,16 @@
#include <stdarg.h>
#include <vector>
-#include "v8.h"
+#include "src/v8.h"
-#include "globals.h"
-#include "utils.h"
-#include "allocation.h"
-#include "assembler.h"
-#include "arm64/assembler-arm64.h"
-#include "arm64/decoder-arm64.h"
-#include "arm64/disasm-arm64.h"
-#include "arm64/instrument-arm64.h"
+#include "src/allocation.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/decoder-arm64.h"
+#include "src/arm64/disasm-arm64.h"
+#include "src/arm64/instrument-arm64.h"
+#include "src/assembler.h"
+#include "src/globals.h"
+#include "src/utils.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
@@ -54,9 +54,6 @@ typedef int (*arm64_regexp_matcher)(String* input,
(FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
public:
@@ -136,35 +133,28 @@ class SimSystemRegister {
// Represent a register (r0-r31, v0-v31).
-template<int kSizeInBytes>
class SimRegisterBase {
public:
template<typename T>
- void Set(T new_value, unsigned size = sizeof(T)) {
- ASSERT(size <= kSizeInBytes);
- ASSERT(size <= sizeof(new_value));
- // All AArch64 registers are zero-extending; Writing a W register clears the
- // top bits of the corresponding X register.
- memset(value_, 0, kSizeInBytes);
- memcpy(value_, &new_value, size);
+ void Set(T new_value) {
+ value_ = 0;
+ memcpy(&value_, &new_value, sizeof(T));
}
- // Copy 'size' bytes of the register to the result, and zero-extend to fill
- // the result.
template<typename T>
- T Get(unsigned size = sizeof(T)) const {
- ASSERT(size <= kSizeInBytes);
+ T Get() const {
T result;
- memset(&result, 0, sizeof(result));
- memcpy(&result, value_, size);
+ memcpy(&result, &value_, sizeof(T));
return result;
}
protected:
- uint8_t value_[kSizeInBytes];
+ int64_t value_;
};
-typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
-typedef SimRegisterBase<kDRegSize> SimFPRegister; // v0-v31
+
+
+typedef SimRegisterBase SimRegister; // r0-r31
+typedef SimRegisterBase SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
@@ -221,13 +211,14 @@ class Simulator : public DecoderVisitor {
public:
template<typename T>
explicit CallArgument(T argument) {
- ASSERT(sizeof(argument) <= sizeof(bits_));
+ bits_ = 0;
+ DCHECK(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = X_ARG;
}
explicit CallArgument(double argument) {
- ASSERT(sizeof(argument) == sizeof(bits_));
+ DCHECK(sizeof(argument) == sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
@@ -238,10 +229,10 @@ class Simulator : public DecoderVisitor {
UNIMPLEMENTED();
// Make the D register a NaN to try to trap errors if the callee expects a
// double. If it expects a float, the callee should ignore the top word.
- ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
+ DCHECK(sizeof(kFP64SignallingNaN) == sizeof(bits_));
memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
// Write the float payload to the S register.
- ASSERT(sizeof(argument) <= sizeof(bits_));
+ DCHECK(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
@@ -299,7 +290,7 @@ class Simulator : public DecoderVisitor {
// Simulation helpers.
template <typename T>
void set_pc(T new_pc) {
- ASSERT(sizeof(T) == sizeof(pc_));
+ DCHECK(sizeof(T) == sizeof(pc_));
memcpy(&pc_, &new_pc, sizeof(T));
pc_modified_ = true;
}
@@ -318,7 +309,7 @@ class Simulator : public DecoderVisitor {
}
void ExecuteInstruction() {
- ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
CheckBreakNext();
Decode(pc_);
LogProcessorState();
@@ -331,98 +322,65 @@ class Simulator : public DecoderVisitor {
VISITOR_LIST(DECLARE)
#undef DECLARE
- // Register accessors.
+ bool IsZeroRegister(unsigned code, Reg31Mode r31mode) const {
+ return ((code == 31) && (r31mode == Reg31IsZeroRegister));
+ }
+ // Register accessors.
// Return 'size' bits of the value of an integer register, as the specified
// type. The value is zero-extended to fill the result.
//
- // The only supported values of 'size' are kXRegSizeInBits and
- // kWRegSizeInBits.
- template<typename T>
- T reg(unsigned size, unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
- unsigned size_in_bytes = size / 8;
- ASSERT(size_in_bytes <= sizeof(T));
- ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
- ASSERT(code < kNumberOfRegisters);
-
- if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
- T result;
- memset(&result, 0, sizeof(result));
- return result;
- }
- return registers_[code].Get<T>(size_in_bytes);
- }
-
- // Like reg(), but infer the access size from the template type.
template<typename T>
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
- return reg<T>(sizeof(T) * 8, code, r31mode);
+ DCHECK(code < kNumberOfRegisters);
+ if (IsZeroRegister(code, r31mode)) {
+ return 0;
+ }
+ return registers_[code].Get<T>();
}
// Common specialized accessors for the reg() template.
- int32_t wreg(unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ int32_t wreg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int32_t>(code, r31mode);
}
- int64_t xreg(unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ int64_t xreg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int64_t>(code, r31mode);
}
- int64_t reg(unsigned size, unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
- return reg<int64_t>(size, code, r31mode);
- }
-
// Write 'size' bits of 'value' into an integer register. The value is
// zero-extended. This behaviour matches AArch64 register writes.
- //
- // The only supported values of 'size' are kXRegSizeInBits and
- // kWRegSizeInBits.
- template<typename T>
- void set_reg(unsigned size, unsigned code, T value,
- Reg31Mode r31mode = Reg31IsZeroRegister) {
- unsigned size_in_bytes = size / 8;
- ASSERT(size_in_bytes <= sizeof(T));
- ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
- ASSERT(code < kNumberOfRegisters);
-
- if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
- return;
- }
- return registers_[code].Set(value, size_in_bytes);
- }
// Like set_reg(), but infer the access size from the template type.
template<typename T>
void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
- set_reg(sizeof(value) * 8, code, value, r31mode);
+ DCHECK(code < kNumberOfRegisters);
+ if (!IsZeroRegister(code, r31mode))
+ registers_[code].Set(value);
}
// Common specialized accessors for the set_reg() template.
void set_wreg(unsigned code, int32_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
- set_reg(kWRegSizeInBits, code, value, r31mode);
+ set_reg(code, value, r31mode);
}
void set_xreg(unsigned code, int64_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
- set_reg(kXRegSizeInBits, code, value, r31mode);
+ set_reg(code, value, r31mode);
}
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
- ASSERT(sizeof(T) == kPointerSize);
+ DCHECK(sizeof(T) == kPointerSize);
set_reg(kLinkRegCode, value);
}
template<typename T>
void set_sp(T value) {
- ASSERT(sizeof(T) == kPointerSize);
+ DCHECK(sizeof(T) == kPointerSize);
set_reg(31, value, Reg31IsStackPointer);
}
@@ -435,24 +393,10 @@ class Simulator : public DecoderVisitor {
Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
- // Return 'size' bits of the value of a floating-point register, as the
- // specified type. The value is zero-extended to fill the result.
- //
- // The only supported values of 'size' are kDRegSizeInBits and
- // kSRegSizeInBits.
- template<typename T>
- T fpreg(unsigned size, unsigned code) const {
- unsigned size_in_bytes = size / 8;
- ASSERT(size_in_bytes <= sizeof(T));
- ASSERT((size == kDRegSizeInBits) || (size == kSRegSizeInBits));
- ASSERT(code < kNumberOfFPRegisters);
- return fpregisters_[code].Get<T>(size_in_bytes);
- }
-
- // Like fpreg(), but infer the access size from the template type.
template<typename T>
T fpreg(unsigned code) const {
- return fpreg<T>(sizeof(T) * 8, code);
+ DCHECK(code < kNumberOfRegisters);
+ return fpregisters_[code].Get<T>();
}
// Common specialized accessors for the fpreg() template.
@@ -486,9 +430,9 @@ class Simulator : public DecoderVisitor {
// This behaviour matches AArch64 register writes.
template<typename T>
void set_fpreg(unsigned code, T value) {
- ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
- ASSERT(code < kNumberOfFPRegisters);
- fpregisters_[code].Set(value, sizeof(value));
+ DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
+ DCHECK(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value);
}
// Common specialized accessors for the set_fpreg() template.
@@ -628,14 +572,19 @@ class Simulator : public DecoderVisitor {
return !ConditionPassed(cond);
}
- void AddSubHelper(Instruction* instr, int64_t op2);
- int64_t AddWithCarry(unsigned reg_size,
- bool set_flags,
- int64_t src1,
- int64_t src2,
- int64_t carry_in = 0);
- void LogicalHelper(Instruction* instr, int64_t op2);
- void ConditionalCompareHelper(Instruction* instr, int64_t op2);
+ template<typename T>
+ void AddSubHelper(Instruction* instr, T op2);
+ template<typename T>
+ T AddWithCarry(bool set_flags,
+ T src1,
+ T src2,
+ T carry_in = 0);
+ template<typename T>
+ void AddSubWithCarry(Instruction* instr);
+ template<typename T>
+ void LogicalHelper(Instruction* instr, T op2);
+ template<typename T>
+ void ConditionalCompareHelper(Instruction* instr, T op2);
void LoadStoreHelper(Instruction* instr,
int64_t offset,
AddrMode addrmode);
@@ -662,18 +611,21 @@ class Simulator : public DecoderVisitor {
void MemoryWrite64(uint8_t* address, uint64_t value);
void MemoryWriteFP64(uint8_t* address, double value);
- int64_t ShiftOperand(unsigned reg_size,
- int64_t value,
- Shift shift_type,
- unsigned amount);
- int64_t Rotate(unsigned reg_width,
- int64_t value,
+
+ template <typename T>
+ T ShiftOperand(T value,
Shift shift_type,
unsigned amount);
- int64_t ExtendValue(unsigned reg_width,
- int64_t value,
- Extend extend_type,
- unsigned left_shift = 0);
+ template <typename T>
+ T ExtendValue(T value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+ template <typename T>
+ void Extract(Instruction* instr);
+ template <typename T>
+ void DataProcessing2Source(Instruction* instr);
+ template <typename T>
+ void BitfieldHelper(Instruction* instr);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
@@ -757,6 +709,9 @@ class Simulator : public DecoderVisitor {
void CorruptAllCallerSavedCPURegisters();
#endif
+ // Pseudo Printf instruction
+ void DoPrintf(Instruction* instr);
+
// Processor state ---------------------------------------
// Output stream.
@@ -789,15 +744,16 @@ class Simulator : public DecoderVisitor {
// functions, or to save and restore it when entering and leaving generated
// code.
void AssertSupportedFPCR() {
- ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
- ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+ DCHECK(fpcr().FZ() == 0); // No flush-to-zero support.
+ DCHECK(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
// The simulator does not support half-precision operations so fpcr().AHP()
// is irrelevant, and is not checked here.
}
- static int CalcNFlag(uint64_t result, unsigned reg_size) {
- return (result >> (reg_size - 1)) & 1;
+ template <typename T>
+ static int CalcNFlag(T result) {
+ return (result >> (sizeof(T) * 8 - 1)) & 1;
}
static int CalcZFlag(uint64_t result) {
@@ -854,10 +810,6 @@ class Simulator : public DecoderVisitor {
entry, \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code.
diff --git a/deps/v8/src/arm64/stub-cache-arm64.cc b/deps/v8/src/arm64/stub-cache-arm64.cc
index 760fbb354..b7d43a477 100644
--- a/deps/v8/src/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/arm64/stub-cache-arm64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
+#include "src/codegen.h"
+#include "src/ic-inl.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -17,14 +17,11 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(!AreAliased(receiver, scratch0, scratch1));
- ASSERT(name->IsUniqueName());
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
+ DCHECK(!AreAliased(receiver, scratch0, scratch1));
+ DCHECK(name->IsUniqueName());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -96,7 +93,7 @@ static void ProbeTable(Isolate* isolate,
Label miss;
- ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
+ DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
// Multiply by 3 because there are 3 fields per entry.
__ Add(scratch3, offset, Operand(offset, LSL, 1));
@@ -154,15 +151,15 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Label miss;
// Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Make sure extra and extra2 registers are valid.
- ASSERT(!extra.is(no_reg));
- ASSERT(!extra2.is(no_reg));
- ASSERT(!extra3.is(no_reg));
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
@@ -177,7 +174,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ Add(scratch, scratch, extra);
__ Eor(scratch, scratch, flags);
// We shift out the last two bits because they are not part of the hash.
- __ Ubfx(scratch, scratch, kHeapObjectTagSize,
+ __ Ubfx(scratch, scratch, kCacheIndexShift,
CountTrailingZeros(kPrimaryTableSize, 64));
// Probe the primary table.
@@ -185,8 +182,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary table.
- __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
- __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
+ __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
+ __ Add(scratch, scratch, flags >> kCacheIndexShift);
__ And(scratch, scratch, kSecondaryTableSize - 1);
// Probe the secondary table.
@@ -201,29 +198,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
}
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ Ldr(prototype, GlobalObjectMemOperand());
- // Load the native context from the global or builtins object.
- __ Ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- __ Ldr(prototype, ContextMemOperand(prototype, index));
- // Load the initial map. The global functions all have initial maps.
- __ Ldr(prototype,
- FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
Isolate* isolate = masm->isolate();
// Get the global function with the given index.
Handle<JSFunction> function(
@@ -244,50 +220,9 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index,
- Representation representation) {
- ASSERT(!representation.IsDouble());
- USE(representation);
- if (inobject) {
- int offset = index * kPointerSize;
- __ Ldr(dst, FieldMemOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ Ldr(dst, FieldMemOperand(dst, offset));
- }
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- ASSERT(!AreAliased(receiver, scratch));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
- miss_label);
-
- // Load length directly from the JS array.
- __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register scratch1,
+ Register scratch2, Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
// TryGetFunctionPrototype can't put the result directly in x0 because the
// 3 inputs registers can't alias and we call this function from
@@ -301,31 +236,134 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<JSGlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
+ DCHECK(cell->value()->IsTheHole());
__ Mov(scratch, Operand(cell));
__ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
}
-void StoreStubCompiler::GenerateNegativeHolderLookup(
- MacroAssembler* masm,
- Handle<JSObject> holder,
- Register holder_reg,
- Handle<Name> name,
- Label* miss) {
- if (holder->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(
- masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
- } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
- GenerateDictionaryNegativeLookup(
- masm, miss, holder_reg, name, scratch1(), scratch2());
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ Mov(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch,
+ bool is_store, int argc, Register* values) {
+ DCHECK(!AreAliased(receiver, scratch));
+
+ MacroAssembler::PushPopQueue queue(masm);
+ queue.Queue(receiver);
+ // Write the arguments to the stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc - 1 - i];
+ DCHECK(!AreAliased(receiver, scratch, arg));
+ queue.Queue(arg);
+ }
+ queue.PushQueued();
+
+ DCHECK(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Mov(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ LoadObject(call_data, api_call_info);
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ LoadObject(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference ref = ExternalReference(
+ &fun, ExternalReference::DIRECT_API_CALL, masm->isolate());
+ __ Mov(api_function_address, ref);
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ Bind(label);
+ __ Mov(this->name(), Operand(name));
}
}
@@ -334,22 +372,13 @@ void StoreStubCompiler::GenerateNegativeHolderLookup(
// When leaving generated code after success, the receiver_reg and storage_reg
// may be clobbered. Upon branch to miss_label, the receiver and name registers
// have their original values.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* slow) {
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+ Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+ Register storage_reg, Register value_reg, Register scratch1,
+ Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
Label exit;
- ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
+ DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg,
scratch1, scratch2, scratch3));
// We don't need scratch3.
@@ -359,10 +388,10 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
- ASSERT(!representation.IsNone());
+ DCHECK(!representation.IsNone());
if (details.type() == CONSTANT) {
- Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
__ LoadObject(scratch1, constant);
__ Cmp(value_reg, scratch1);
__ B(ne, miss_label);
@@ -387,7 +416,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Bind(&do_store);
}
} else if (representation.IsDouble()) {
- UseScratchRegisterScope temps(masm);
+ UseScratchRegisterScope temps(masm());
DoubleRegister temp_double = temps.AcquireD();
__ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
@@ -399,24 +428,24 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ Bind(&do_store);
- __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double);
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
+ NoReg, MUTABLE);
}
- // Stub never generated for non-global objects that require access checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for objects that require access checks.
+ DCHECK(!transition->is_access_check_needed());
// Perform map transition for the receiver if necessary.
- if ((details.type() == FIELD) &&
- (object->map()->unused_property_fields() == 0)) {
+ if (details.type() == FIELD &&
+ Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ Mov(scratch1, Operand(transition));
__ Push(receiver_reg, scratch1, value_reg);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
+ isolate()),
+ 3, 1);
return;
}
@@ -435,7 +464,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
- ASSERT(value_reg.is(x0));
+ DCHECK(value_reg.is(x0));
__ Ret();
return;
}
@@ -446,7 +475,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
+ index -= transition->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
@@ -454,7 +483,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
if (index < 0) {
// Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
+ int offset = transition->instance_size() + (index * kPointerSize);
__ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
if (!representation.IsSmi()) {
@@ -497,311 +526,57 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Bind(&exit);
// Return the value (register x0).
- ASSERT(value_reg.is(x0));
+ DCHECK(value_reg.is(x0));
__ Ret();
}
-// Generate StoreField code, value is passed in x0 register.
-// When leaving generated code after success, the receiver_reg and name_reg may
-// be clobbered. Upon branch to miss_label, the receiver and name registers have
-// their original values.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // x0 : value
- Label exit;
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- Representation representation = lookup->representation();
- ASSERT(!representation.IsNone());
- if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- HeapType* field_type = lookup->GetFieldType();
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CompareMap(scratch1, it.Current());
- it.Advance();
- if (it.Done()) {
- __ B(ne, miss_label);
- break;
- }
- __ B(eq, &do_store);
- }
- __ Bind(&do_store);
- }
- } else if (representation.IsDouble()) {
- UseScratchRegisterScope temps(masm);
- DoubleRegister temp_double = temps.AcquireD();
-
- __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
-
- // Load the double storage.
- if (index < 0) {
- int offset = (index * kPointerSize) + object->map()->instance_size();
- __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
- } else {
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- __ Ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
- }
-
- // Store the value into the storage.
- Label do_store, heap_number;
-
- __ JumpIfSmi(value_reg, &do_store);
-
- __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
- __ Bind(&do_store);
- __ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
-
- // Return the value (register x0).
- ASSERT(value_reg.is(x0));
- __ Ret();
- return;
- }
-
- // TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
-
- if (!representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ Mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ Ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ Str(value_reg, FieldMemOperand(scratch1, offset));
-
- if (!representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ Mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- }
-
- __ Bind(&exit);
- // Return the value (register x0).
- ASSERT(value_reg.is(x0));
- __ Ret();
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ Bind(label);
- __ Mov(this->name(), Operand(name));
- }
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-
- __ Push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ Mov(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(id), masm->isolate()),
- StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch,
- bool is_store,
- int argc,
- Register* values) {
- ASSERT(!AreAliased(receiver, scratch));
-
- MacroAssembler::PushPopQueue queue(masm);
- queue.Queue(receiver);
- // Write the arguments to the stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
- ASSERT(!AreAliased(receiver, scratch, arg));
- queue.Queue(arg);
- }
- queue.PushQueued();
-
- ASSERT(optimization.is_simple_api_call());
-
- // Abi for CallApiFunctionStub.
- Register callee = x0;
- Register call_data = x4;
- Register holder = x2;
- Register api_function_address = x1;
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Mov(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ LoadObject(holder, api_holder);
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup,
+ Register value_reg,
+ Label* miss_label) {
+ DCHECK(lookup->representation().IsHeapObject());
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+ __ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1(), it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ B(ne, miss_label);
break;
+ }
+ __ B(eq, &do_store);
}
+ __ Bind(&do_store);
- Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadObject(callee, function);
-
- bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ LoadObject(call_data, api_call_info);
- __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
- call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- } else {
- __ LoadObject(call_data, call_data_obj);
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- masm->isolate());
- __ Mov(api_function_address, ref);
-
- // Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
- __ TailCallStub(&stub);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ GenerateTailCall(masm(), stub.GetCode());
}
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<Name> name,
- Label* miss,
- PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Register object_reg, Register holder_reg, Register scratch1,
+ Register scratch2, Handle<Name> name, Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
// object_reg and holder_reg registers can alias.
- ASSERT(!AreAliased(object_reg, scratch1, scratch2));
- ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+ DCHECK(!AreAliased(object_reg, scratch1, scratch2));
+ DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) {
- current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ if (type()->IsConstant()) {
+ current = Handle<JSObject>::cast(type()->AsConstant()->Value());
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
- Handle<Map> holder_map(holder->map());
+ Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
@@ -809,18 +584,18 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current_map->IsJSGlobalProxyMap() ||
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap() &&
- !current_map->IsJSGlobalProxyMap()) {
+ !current_map->IsJSGlobalObjectMap()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
- ASSERT(name->IsString());
+ DCHECK(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current.is_null() ||
+ DCHECK(current.is_null() ||
(current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound));
@@ -831,13 +606,14 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
reg = holder_reg; // From now on the object will be in holder_reg.
__ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
- heap()->InNewSpace(*prototype);
- Register map_reg = NoReg;
- if (need_map) {
- map_reg = scratch1;
- __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
+ // Two possible reasons for loading the prototype from the map:
+ // (1) Can't store references to new space in code.
+ // (2) Handler is shared for all receivers with the same prototype
+ // map (but not necessarily the same prototype instance).
+ bool load_prototype_from_map =
+ heap()->InNewSpace(*prototype) || depth == 1;
+ Register map_reg = scratch1;
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
@@ -846,6 +622,9 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
if (current_map->IsJSGlobalProxyMap()) {
UseScratchRegisterScope temps(masm());
__ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
@@ -857,12 +636,9 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
reg = holder_reg; // From now on the object will be in holder_reg.
- if (heap()->InNewSpace(*prototype)) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
+ if (load_prototype_from_map) {
__ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
} else {
- // The prototype is in old space; load it directly.
__ Mov(reg, Operand(prototype));
}
}
@@ -882,7 +658,7 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
}
// Perform security check for access to the global object.
- ASSERT(current_map->IsJSGlobalProxyMap() ||
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
@@ -893,7 +669,7 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
}
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ B(&success);
@@ -906,12 +682,12 @@ void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
}
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ B(&success);
- GenerateRestoreName(masm(), miss, name);
+ GenerateRestoreName(miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ Bind(&success);
@@ -919,84 +695,16 @@ void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
}
-Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<Object> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
- // HandlerFrontendHeader can return its result into scratch1() so do not
- // use it.
- Register scratch2 = this->scratch2();
- Register scratch3 = this->scratch3();
- Register dictionary = this->scratch4();
- ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- // Load the properties dictionary.
- __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2,
- scratch3);
- __ Bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3;
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
- __ Cmp(scratch2, Operand(callback));
- __ B(ne, &miss);
- }
-
- HandlerFrontendFooter(name, &miss);
- return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
- __ Mov(receiver(), reg);
- if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(isolate(),
- field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode());
- } else {
- KeyedLoadFieldStub stub(isolate(),
- field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode());
- }
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(x0, value);
__ Ret();
}
-void LoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<ExecutableAccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
// Build ExecutableAccessorInfo::args_ list on the stack and push property
// name below the exit frame to make GC aware of them and store pointers to
@@ -1048,16 +756,13 @@ void LoadStubCompiler::GenerateLoadCallback(
}
-void LoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<Object> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name) {
- ASSERT(!AreAliased(receiver(), this->name(),
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ DCHECK(!AreAliased(receiver(), this->name(),
scratch1(), scratch2(), scratch3()));
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added later.
@@ -1067,10 +772,12 @@ void LoadStubCompiler::GenerateLoadInterceptor(
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
+ Handle<ExecutableAccessorInfo> callback(
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
+ compile_followup_inline =
+ callback->getter() != NULL &&
+ ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), callback,
+ type());
}
}
@@ -1078,13 +785,13 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_perfrom_prototype_check = *holder() != lookup->holder();
bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
@@ -1101,7 +808,7 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ masm(), receiver(), holder_reg, this->name(), holder(),
IC::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
@@ -1121,36 +828,34 @@ void LoadStubCompiler::GenerateLoadInterceptor(
}
// Leave the internal frame.
}
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ GenerateLoadPostInterceptor(holder_reg, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- PushInterceptorArguments(
- masm(), receiver(), holder_reg, this->name(), interceptor_holder);
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
isolate());
- __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
- Register holder_reg = HandlerFrontend(
- IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+ ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
+ Register holder_reg = Frontend(receiver(), name);
// Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
// receiver() and holder_reg can alias.
- ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
- ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
+ DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
+ DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
__ Mov(scratch1(), Operand(callback));
__ Mov(scratch2(), Operand(name));
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
@@ -1169,10 +874,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
#define __ ACCESS_MASM(masm)
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<HeapType> type,
- Register receiver,
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- lr : return address
@@ -1190,8 +893,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ Ldr(receiver,
- FieldMemOperand(
- receiver, JSGlobalObject::kGlobalReceiverOffset));
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
}
__ Push(receiver, value());
ParameterCount actual(1);
@@ -1218,18 +920,17 @@ void StoreStubCompiler::GenerateStoreViaSetter(
#define __ ACCESS_MASM(masm())
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> object,
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
Label miss;
- ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
+ ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ ExternalReference store_ic_property = ExternalReference(
+ IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
@@ -1237,67 +938,41 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
- Handle<JSObject> last,
- Handle<Name> name) {
- NonexistentHandlerFrontend(type, last, name);
-
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
// TODO(all): The so-called scratch registers are significant in some cases. For
-// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
-// KeyedStoreCompiler::transition_map(). We should verify which registers are
-// actually scratch registers, and which are important. For now, we use the same
-// assignments as ARM to remain on the safe side.
+// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
+// actually
+// used for KeyedStoreCompiler::transition_map(). We should verify which
+// registers are actually scratch registers, and which are important. For now,
+// we use the same assignments as ARM to remain on the safe side.
-Register* LoadStubCompiler::registers() {
+Register* PropertyAccessCompiler::load_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { x0, x2, x3, x1, x4, x5 };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { x1, x0, x2, x3, x4, x5 };
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ static Register registers[] = { receiver, name, x3, x0, x4, x5 };
return registers;
}
-Register StoreStubCompiler::value() {
- return x0;
-}
-
-
-Register* StoreStubCompiler::registers() {
+Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, value, scratch1, scratch2, scratch3.
- static Register registers[] = { x1, x2, x3, x4, x5 };
+ Register receiver = StoreIC::ReceiverRegister();
+ Register name = StoreIC::NameRegister();
+ DCHECK(x3.is(KeyedStoreIC::MapRegister()));
+ static Register registers[] = { receiver, name, x3, x4, x5 };
return registers;
}
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3.
- static Register registers[] = { x2, x1, x3, x4, x5 };
- return registers;
-}
+Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
#undef __
#define __ ACCESS_MASM(masm)
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<HeapType> type,
- Register receiver,
- Handle<JSFunction> getter) {
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1306,8 +981,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ Ldr(receiver,
- FieldMemOperand(
- receiver, JSGlobalObject::kGlobalReceiverOffset));
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
}
__ Push(receiver);
ParameterCount actual(0);
@@ -1331,54 +1005,58 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<HeapType> type,
- Handle<GlobalObject> global,
- Handle<PropertyCell> cell,
- Handle<Name> name,
- bool is_dont_delete) {
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
- HandlerFrontendHeader(type, receiver(), global, name, &miss);
+ FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
- __ Mov(x3, Operand(cell));
- __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
+ Register result = StoreIC::ValueRegister();
+ __ Mov(result, Operand(cell));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
+ if (is_configurable) {
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
}
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
- __ Mov(x0, x4);
__ Ret();
- HandlerFrontendFooter(name, &miss);
+ FrontendFooter(name, &miss);
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ __ JumpIfNotUniqueName(this->name(), &miss);
+ } else {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
}
Label number_case;
Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
+ // Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(KeyedStoreIC::MapRegister()));
__ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = types->length();
int number_of_handled_maps = 0;
@@ -1391,14 +1069,14 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
__ Cmp(map_reg, Operand(map));
__ B(ne, &try_next);
if (type->Is(HeapType::Number())) {
- ASSERT(!number_case.is_unused());
+ DCHECK(!number_case.is_unused());
__ Bind(&number_case);
}
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
__ Bind(&try_next);
}
}
- ASSERT(number_of_handled_maps != 0);
+ DCHECK(number_of_handled_maps != 0);
__ Bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
@@ -1406,28 +1084,16 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
// Return the generated code.
InlineCacheState state =
(number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
- return GetICCode(kind(), type, name, state);
+ return GetCode(kind(), type, name, state);
}
-void StoreStubCompiler::GenerateStoreArrayLength() {
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver(), value());
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
- masm()->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
Label miss;
- ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
+ ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
__ JumpIfSmi(receiver(), &miss);
@@ -1450,35 +1116,32 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
- return GetICCode(
- kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
#undef __
#define __ ACCESS_MASM(masm)
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+void ElementHandlerCompiler::GenerateLoadDictionaryElement(
MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
+ // The return address is in lr.
Label slow, miss;
Register result = x0;
- Register key = x0;
- Register receiver = x1;
+ Register key = LoadIC::NameRegister();
+ Register receiver = LoadIC::ReceiverRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
__ JumpIfNotSmi(key, &miss);
__ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x7, x3, x5, x6);
__ Ret();
__ Bind(&slow);
__ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3);
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index 53a2957e9..dbfb87638 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_ARM64
-#include "arm64/utils-arm64.h"
+#include "src/arm64/utils-arm64.h"
namespace v8 {
@@ -15,7 +15,7 @@ namespace internal {
int CountLeadingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
- ASSERT((width == 32) || (width == 64));
+ DCHECK((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
@@ -28,7 +28,7 @@ int CountLeadingZeros(uint64_t value, int width) {
int CountLeadingSignBits(int64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
- ASSERT((width == 32) || (width == 64));
+ DCHECK((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
@@ -39,7 +39,7 @@ int CountLeadingSignBits(int64_t value, int width) {
int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
- ASSERT((width == 32) || (width == 64));
+ DCHECK((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
@@ -51,7 +51,7 @@ int CountTrailingZeros(uint64_t value, int width) {
int CountSetBits(uint64_t value, int width) {
// TODO(jbramley): Would it be useful to allow other widths? The
// implementation already supports them.
- ASSERT((width == 32) || (width == 64));
+ DCHECK((width == 32) || (width == 64));
// Mask out unused bits to ensure that they are not counted.
value &= (0xffffffffffffffffUL >> (64-width));
@@ -78,8 +78,13 @@ int CountSetBits(uint64_t value, int width) {
}
+uint64_t LargestPowerOf2Divisor(uint64_t value) {
+ return value & -value;
+}
+
+
int MaskToBit(uint64_t mask) {
- ASSERT(CountSetBits(mask, 64) == 1);
+ DCHECK(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
}
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index c739e50f2..c22ed9aed 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -6,8 +6,9 @@
#define V8_ARM64_UTILS_ARM64_H_
#include <cmath>
-#include "v8.h"
-#include "arm64/constants-arm64.h"
+#include "src/v8.h"
+
+#include "src/arm64/constants-arm64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
@@ -56,6 +57,7 @@ int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
+uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
@@ -86,13 +88,13 @@ inline bool IsQuietNaN(T num) {
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
- ASSERT(isnan(num));
+ DCHECK(std::isnan(num));
return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
}
inline float ToQuietNaN(float num) {
- ASSERT(isnan(num));
+ DCHECK(std::isnan(num));
return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
}