summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
authorTrevor Norris <trev.norris@gmail.com>2013-03-18 13:49:34 -0700
committerBen Noordhuis <info@bnoordhuis.nl>2013-03-20 01:11:01 +0100
commit83261e789eb903da39f279cb5a161611482e7df5 (patch)
tree4133b5ca9f53bed4365e1a94544a227d68a0cf12 /deps/v8/src/mips
parenta05f973f82d2be8527aad4c371d40d3c7e4c564e (diff)
downloadnode-new-83261e789eb903da39f279cb5a161611482e7df5.tar.gz
deps: update v8 to 3.17.13
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h77
-rw-r--r--deps/v8/src/mips/assembler-mips.cc172
-rw-r--r--deps/v8/src/mips/assembler-mips.h164
-rw-r--r--deps/v8/src/mips/builtins-mips.cc191
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc2088
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h189
-rw-r--r--deps/v8/src/mips/codegen-mips.cc292
-rw-r--r--deps/v8/src/mips/codegen-mips.h22
-rw-r--r--deps/v8/src/mips/constants-mips.cc2
-rw-r--r--deps/v8/src/mips/constants-mips.h22
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc477
-rw-r--r--deps/v8/src/mips/disasm-mips.cc13
-rw-r--r--deps/v8/src/mips/frames-mips.cc9
-rw-r--r--deps/v8/src/mips/frames-mips.h40
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc332
-rw-r--r--deps/v8/src/mips/ic-mips.cc312
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc1687
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h44
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc7
-rw-r--r--deps/v8/src/mips/lithium-mips.cc422
-rw-r--r--deps/v8/src/mips/lithium-mips.h334
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc299
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h77
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc31
-rw-r--r--deps/v8/src/mips/simulator-mips.cc134
-rw-r--r--deps/v8/src/mips/simulator-mips.h5
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc1915
27 files changed, 4980 insertions, 4377 deletions
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 3e726a7545..9c9f611ed0 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -1,3 +1,4 @@
+
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
@@ -65,7 +66,7 @@ Operand::Operand(const ExternalReference& f) {
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -79,9 +80,36 @@ bool Operand::is_reg() const {
}
+int Register::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(FPU)) {
+ return kMaxNumAllocatableRegisters;
+ } else {
+ return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
+ }
+}
+
+
+int DoubleRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(FPU)) {
+ return FPURegister::kMaxNumRegisters;
+ } else {
+ return 1;
+ }
+}
+
+
+int DoubleRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(FPU)) {
+ return FPURegister::kMaxNumAllocatableRegisters;
+ } else {
+ return 1;
+ }
+}
+
+
int FPURegister::ToAllocationIndex(FPURegister reg) {
ASSERT(reg.code() % 2 == 0);
- ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+ ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
ASSERT(reg.is_valid());
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kLithiumScratchDouble));
@@ -111,14 +139,14 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) ||
- rmode_ == RUNTIME_ENTRY ||
+ IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT ||
rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
@@ -146,7 +174,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -203,6 +231,19 @@ Address* RelocInfo::target_reference_address() {
}
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
@@ -231,6 +272,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
+static const int kNoCodeAgeSequenceLength = 7;
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)) =
+ stub->instruction_start();
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -302,6 +361,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -311,7 +372,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
@@ -328,6 +389,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -336,7 +399,7 @@ void RelocInfo::Visit(Heap* heap) {
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index a4563a64f3..4c11c7f549 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -47,7 +47,13 @@ namespace internal {
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+
+
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
// Get the CPU features enabled by the build. For cross compilation the
@@ -57,7 +63,7 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
static uint64_t CpuFeaturesImpliedByCompiler() {
uint64_t answer = 0;
#ifdef CAN_USE_FPU_INSTRUCTIONS
- answer |= 1u << FPU;
+ answer |= static_cast<uint64_t>(1) << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
#ifdef __mips__
@@ -65,7 +71,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
// generation even when generating snapshots. This won't work for cross
// compilation.
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
- answer |= 1u << FPU;
+ answer |= static_cast<uint64_t>(1) << FPU;
#endif // defined(__mips_hard_float) && __mips_hard_float != 0
#endif // def __mips__
@@ -73,6 +79,33 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
}
+const char* DoubleRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "f0",
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26"
+ };
+ return names[index];
+ } else {
+ ASSERT(index == 0);
+ return "sfpd0";
+ }
+}
+
+
void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler());
@@ -96,15 +129,15 @@ void CpuFeatures::Probe() {
#if !defined(__mips__)
// For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
if (FLAG_enable_fpu) {
- supported_ |= 1u << FPU;
+ supported_ |= static_cast<uint64_t>(1) << FPU;
}
#else
// Probe for additional features not already known to be available.
if (OS::MipsCpuHasFeature(FPU)) {
// This implementation also sets the FPU flags if
// runtime detection of FPU returns true.
- supported_ |= 1u << FPU;
- found_by_runtime_probing_ |= 1u << FPU;
+ supported_ |= static_cast<uint64_t>(1) << FPU;
+ found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
}
#endif
}
@@ -221,7 +254,7 @@ Operand::Operand(Handle<Object> handle) {
} else {
// No relocation needed.
imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
}
@@ -267,45 +300,11 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-// Spare buffer.
-static const int kMinimalBufferSize = 4 * KB;
-
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ positions_recorder_(this) {
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
@@ -324,18 +323,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
@@ -602,7 +589,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
int32_t Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
- return ((int16_t)(instr & kImm16Mask)) << 2;
+ return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
}
@@ -735,7 +722,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
instr_lui &= ~kImm16Mask;
@@ -746,7 +733,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
- uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0);
@@ -851,7 +838,7 @@ bool Assembler::is_near(Label* L) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return rmode != RelocInfo::NONE;
+ return !RelocInfo::IsNone(rmode);
}
void Assembler::GenInstrRegister(Opcode opcode,
@@ -887,7 +874,7 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
| (fd.code() << kFdShift) | func;
emit(instr);
@@ -895,13 +882,27 @@ void Assembler::GenInstrRegister(Opcode opcode,
void Assembler::GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
+ ASSERT(IsEnabled(FPU));
+ Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
@@ -914,7 +915,7 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPUControlRegister fs,
SecondaryField func) {
ASSERT(fs.is_valid() && rt.is_valid());
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
@@ -949,7 +950,7 @@ void Assembler::GenInstrImmediate(Opcode opcode,
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
@@ -998,7 +999,7 @@ uint32_t Assembler::jump_address(Label* L) {
}
}
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
return imm;
@@ -1133,7 +1134,8 @@ void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
GenInstrJump(J, target >> 2);
@@ -1154,7 +1156,8 @@ void Assembler::jal(int32_t target) {
#ifdef DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
positions_recorder()->WriteRecordedPositions();
@@ -1173,8 +1176,8 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::j_or_jr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
if (in_range) {
j(target);
} else {
@@ -1186,8 +1189,8 @@ void Assembler::j_or_jr(int32_t target, Register rs) {
void Assembler::jal_or_jalr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits+kImmFieldShift)) == 0;
if (in_range) {
jal(target);
} else {
@@ -1697,6 +1700,12 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
}
+void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
+}
+
+
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
@@ -1863,7 +1872,7 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1874,7 +1883,7 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
ASSERT(src2 == 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
@@ -1883,7 +1892,7 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1891,7 +1900,7 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
void Assembler::bc1t(int16_t offset, uint16_t cc) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1946,7 +1955,7 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
return 2; // Number of instructions patched.
} else {
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if ((int32_t)imm28 == kEndOfJumpChain) {
+ if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
imm28 += pc_delta;
@@ -2036,7 +2045,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
}
- if (rinfo.rmode() != RelocInfo::NONE) {
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
@@ -2196,9 +2205,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range =
- ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
+ bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ uint32_t target_field =
+ static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
bool patched_jump = false;
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 59c45c927a..e6c9e76c78 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -72,20 +72,23 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4;
+ static const int kGPRsPerNonFPUDouble = 2;
+
+ inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
return reg.code() - 2; // zero_reg and 'at' are skipped.
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index + 2); // zero_reg and 'at' are skipped.
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"v0",
"v1",
@@ -186,7 +189,7 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
- static const int kNumRegisters = v8::internal::kNumFPURegisters;
+ static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
// TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
// to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
@@ -197,44 +200,25 @@ struct FPURegister {
// f28: 0.0
// f30: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters / 2 -
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
kNumReservedRegisters;
-
+ inline static int NumRegisters();
+ inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(FPURegister reg);
+ static const char* AllocationIndexToString(int index);
static FPURegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index * 2);
}
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
- }
-
static FPURegister from_code(int code) {
FPURegister r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
+ bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
@@ -316,6 +300,9 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
+const Register sfpd_lo = { kRegister_t6_Code };
+const Register sfpd_hi = { kRegister_t7_Code };
+
// Register aliases.
// cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang
@@ -361,7 +348,7 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
+ RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
@@ -406,7 +393,7 @@ class MemOperand : public Operand {
// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
+// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
@@ -420,89 +407,25 @@ class CpuFeatures : public AllStatic {
return (supported_ & (1u << f)) != 0;
}
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
+ return (found_by_runtime_probing_only_ &
+ (static_cast<uint64_t>(1) << f)) != 0;
}
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
- static unsigned found_by_runtime_probing_;
+ static unsigned found_by_runtime_probing_only_;
+ friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -523,13 +446,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // Dummy for cross platform compatibility.
- void set_predictable_code_size(bool value) { }
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -669,7 +586,9 @@ class Assembler : public AssemblerBase {
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ // Code aging
+ CODE_AGE_MARKER_NOP = 6
};
// Type == 0 is the default non-marking nop. For mips this is a
@@ -822,6 +741,7 @@ class Assembler : public AssemblerBase {
void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
@@ -947,8 +867,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int32_t pc_offset() const { return pc_ - buffer_; }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1033,8 +951,6 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
- bool emit_debug_code() const { return emit_debug_code_; }
-
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
@@ -1093,13 +1009,6 @@ class Assembler : public AssemblerBase {
}
private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
@@ -1110,7 +1019,6 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
- byte* pc_; // The program counter - moves forward.
// Repeated checking whether the trampoline pool should be emitted is rather
@@ -1175,6 +1083,13 @@ class Assembler : public AssemblerBase {
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
@@ -1285,7 +1200,6 @@ class Assembler : public AssemblerBase {
friend class BlockTrampolinePoolScope;
PositionsRecorder positions_recorder_;
- bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 0342e6505d..54efd94913 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -128,12 +128,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
@@ -555,34 +551,64 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a2 : type info cell
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_constructor;
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a3, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function (3)",
t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t0);
+ __ GetObjectType(a3, a3, t0);
__ Assert(eq, "Unexpected initial map for Array function (4)",
t0, Operand(MAP_TYPE));
+
+ if (FLAG_optimize_constructed_arrays) {
+ // We should either have undefined in a2 or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel));
+ __ lw(a3, FieldMemOperand(a2, 0));
+ __ Assert(eq, "Expected property cell in register a3",
+ a3, Operand(global_property_cell_map));
+ __ bind(&okay_here);
+ }
}
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
@@ -635,12 +661,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -----------------------------------
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- v0, // Result.
- a3, // Scratch.
- t0, // Scratch.
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ v0, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
// Initialising the String Object.
Register map = a3;
@@ -698,7 +724,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into a2, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
__ Drop(1);
__ Branch(&argument_is_string);
@@ -728,6 +754,35 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
}
+void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ }
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
+}
+
+
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1072,9 +1127,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a3, a3);
+ __ GetObjectType(v0, a1, a3);
__ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Symbols are "objects".
+ __ lbu(a3, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ Branch(&exit, eq, a3, Operand(SYMBOL_TYPE));
+
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
@@ -1171,6 +1230,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ li(a2, Operand(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -1255,6 +1318,66 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ __ mov(a0, ra);
+ // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ __ Subu(a0, a0,
+ Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ // Restore the original return address of the function
+ __ mov(ra, at);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - function object
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(1, 0, a1);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ __ MultiPop(saved_regs);
+ __ Jump(a0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ Jump(ra); // Jump to miss handler
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1315,12 +1438,6 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CpuFeatures::IsSupported(FPU)) {
- __ Abort("Unreachable code: Cannot optimize without FPU support.");
- return;
- }
-
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1371,7 +1488,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// a0: actual number of arguments
// a1: function
Label shift_arguments;
- __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION.
+ __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1425,7 +1542,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
- __ li(t0, Operand(0, RelocInfo::NONE));
+ __ li(t0, Operand(0, RelocInfo::NONE32));
__ Branch(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1448,11 +1565,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy.
+ __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
__ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
__ bind(&non_function);
- __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function.
+ __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -1683,7 +1800,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(a1); // Add function proxy as last argument.
__ Addu(a0, a0, Operand(1));
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ SetCallKind(t1, CALL_AS_METHOD);
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index ca31826454..f5908d37bd 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -33,17 +33,90 @@
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a3, a2, a1, a0 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0, a1 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // a1 -- constructor function
+ // a2 -- type info cell with elements kind
+ // a0 -- number of arguments to the constructor function
+ static Register registers[] = { a1, a2 };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &a0;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
- bool never_nan_nan);
+ Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -95,12 +168,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ pop(a3);
// Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- v0,
- a1,
- a2,
- &gc,
- TAG_OBJECT);
+ __ Allocate(JSFunction::kSize, v0, a1, a2, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
@@ -227,12 +295,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- v0,
- a1,
- a2,
- &gc,
- TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ lw(a3, MemOperand(sp, 0));
@@ -276,8 +339,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- v0, a1, a2, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ lw(a3, MemOperand(sp, 0));
@@ -333,6 +395,7 @@ static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
+ AllocationSiteMode allocation_site_mode,
Label* fail) {
// Registers on entry:
// a3: boilerplate literal array.
@@ -345,16 +408,24 @@ static void GenerateFastCloneShallowArrayCommon(
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
- int size = JSArray::kSize + elements_size;
+
+ int size = JSArray::kSize;
+ int allocation_info_start = size;
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ size += AllocationSiteInfo::kSize;
+ }
+ size += elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- v0,
- a1,
- a2,
- fail,
- TAG_OBJECT);
+ __ Allocate(size, v0, a1, a2, fail, TAG_OBJECT);
+
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()->
+ allocation_site_info_map())));
+ __ sw(a2, FieldMemOperand(v0, allocation_info_start));
+ __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize));
+ }
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -368,7 +439,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ Addu(a2, v0, Operand(JSArray::kSize));
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
+ } else {
+ __ Addu(a2, v0, Operand(JSArray::kSize));
+ }
__ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
// Copy the elements array.
@@ -403,16 +478,18 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
__ Branch(&check_fast_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
__ bind(&check_fast_elements);
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
__ Branch(&double_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
@@ -443,7 +520,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(a3);
}
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
@@ -453,55 +532,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into a3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ lw(a3, MemOperand(sp, 3 * kPointerSize));
- __ lw(a0, MemOperand(sp, 2 * kPointerSize));
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, t0, a3);
- __ lw(a3, MemOperand(a3));
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case, eq, a3, Operand(t0));
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
- __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ DropAndRet(4);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
@@ -600,7 +636,7 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ sra(scratch1, a0, kSmiTagSize);
__ mtc1(scratch1, f14);
__ cvt_d_w(f14, f14);
@@ -617,34 +653,16 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
__ mov(scratch1, a0);
ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
__ push(ra);
- __ Call(stub1.GetCode());
+ __ Call(stub1.GetCode(masm->isolate()));
// Write Smi from a1 to a1 and a0 in double format.
__ mov(scratch1, a1);
ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode());
+ __ Call(stub2.GetCode(masm->isolate()));
__ pop(ra);
}
}
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (a0) to f12 or a2/a3.
- LoadNumber(masm, destination,
- a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (a1) to f14 or a0/a1.
- LoadNumber(masm, destination,
- a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -669,7 +687,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a heap number.
if (CpuFeatures::IsSupported(FPU) &&
destination == kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Load the double from tagged HeapNumber to double register.
// ARM uses a workaround here because of the unaligned HeapNumber
@@ -688,7 +706,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi.
__ bind(&is_smi);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Convert smi to double using FPU instructions.
__ mtc1(scratch1, dst);
__ cvt_d_w(dst, dst);
@@ -702,7 +720,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ mov(scratch1, object);
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(ra);
- __ Call(stub.GetCode());
+ __ Call(stub.GetCode(masm->isolate()));
__ pop(ra);
}
@@ -753,79 +771,80 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
FPURegister double_dst,
- Register dst1,
- Register dst2,
+ Register dst_mantissa,
+ Register dst_exponent,
Register scratch2,
FPURegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
+ ASSERT(!int_scratch.is(dst_mantissa));
+ ASSERT(!int_scratch.is(dst_exponent));
Label done;
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ mtc1(int_scratch, single_scratch);
__ cvt_d_w(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ Move(dst1, dst2, double_dst);
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst2 | dst1 |
+ // | dst_exponent | dst_mantissa |
// | s | exp | mantissa |
// Check for zero.
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
+ __ mov(dst_exponent, int_scratch);
+ __ mov(dst_mantissa, int_scratch);
__ Branch(&done, eq, int_scratch, Operand(zero_reg));
// Preload the sign of the value.
- __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+ __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
// Get the absolute value of the object (as an unsigned integer).
Label skip_sub;
- __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+ __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
__ Subu(int_scratch, zero_reg, int_scratch);
__ bind(&skip_sub);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ Clz(dst1, int_scratch);
+ __ Clz(dst_mantissa, int_scratch);
__ li(scratch2, 31);
- __ Subu(dst1, scratch2, dst1);
+ __ Subu(dst_mantissa, scratch2, dst_mantissa);
// Set the exponent.
- __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Ins(dst2, scratch2,
+ __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
+ __ Ins(dst_exponent, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst1);
+ __ sllv(scratch2, scratch2, dst_mantissa);
__ li(at, -1);
__ Xor(scratch2, scratch2, at);
__ And(int_scratch, int_scratch, scratch2);
// Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ Subu(scratch2, dst_mantissa,
+ Operand(HeapNumber::kMantissaBitsInTopWord));
__ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
// Set the higher 20 bits of the mantissa.
__ srlv(at, int_scratch, scratch2);
- __ or_(dst2, dst2, at);
+ __ or_(dst_exponent, dst_exponent, at);
__ li(at, 32);
__ subu(scratch2, at, scratch2);
- __ sllv(dst1, int_scratch, scratch2);
+ __ sllv(dst_mantissa, int_scratch, scratch2);
__ Branch(&done);
__ bind(&fewer_than_20_useful_bits);
__ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst1);
+ __ subu(scratch2, at, dst_mantissa);
__ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst2, dst2, scratch2);
- // Set dst1 to 0.
- __ mov(dst1, zero_reg);
+ __ Or(dst_exponent, dst_exponent, scratch2);
+ // Set dst_mantissa to 0.
+ __ mov(dst_mantissa, zero_reg);
}
__ bind(&done);
}
@@ -835,8 +854,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DoubleRegister double_dst,
- Register dst1,
- Register dst2,
+ DoubleRegister double_scratch,
+ Register dst_mantissa,
+ Register dst_exponent,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -852,8 +872,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
+ dst_exponent, scratch2, single_scratch);
__ Branch(&done);
__ bind(&obj_is_not_smi);
@@ -864,15 +884,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
// Load the number.
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Load the double value.
__ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_dst,
scratch1,
+ double_dst,
+ at,
+ double_scratch,
except_flag,
kCheckForInexactConversion);
@@ -880,27 +901,51 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
if (destination == kCoreRegisters) {
- __ Move(dst1, dst2, double_dst);
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
// Load the double value in the destination registers.
- __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
+ if (save_registers) {
+ // Save both output registers, because the other one probably holds
+ // an important value too.
+ __ Push(dst_exponent, dst_mantissa);
+ }
+ __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
// Check for 0 and -0.
- __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst2));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
+ Label zero;
+ __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
+ __ Or(scratch1, scratch1, Operand(dst_mantissa));
+ __ Branch(&zero, eq, scratch1, Operand(zero_reg));
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+ Label restore_input_and_miss;
+ DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
+ &restore_input_and_miss);
- // dst1 and dst2 were trashed. Reload the double value.
- __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ // dst_* were trashed. Reload the double value.
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ __ Branch(&done);
+
+ __ bind(&restore_input_and_miss);
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ Branch(not_int32);
+
+ __ bind(&zero);
+ if (save_registers) {
+ __ Drop(2);
+ }
}
__ bind(&done);
@@ -914,7 +959,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- DoubleRegister double_scratch,
+ DoubleRegister double_scratch0,
+ DoubleRegister double_scratch1,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -922,36 +968,34 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done;
+ Label done, maybe_undefined;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Load the double value.
- __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
- FPURegister single_scratch = double_scratch.low();
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
+ dst,
+ double_scratch0,
scratch1,
+ double_scratch1,
except_flag,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- // Get the result in the destination register.
- __ mfc1(dst, single_scratch);
-
} else {
// Load the double value in the destination registers.
__ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
@@ -983,20 +1027,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ Subu(dst, zero_reg, dst);
__ bind(&skip_sub);
}
+ __ Branch(&done);
+
+ __ bind(&maybe_undefined);
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(not_int32, ne, object, Operand(at));
+ // |undefined| is truncated to 0.
+ __ li(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
+ Register src_exponent,
+ Register src_mantissa,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ext(scratch,
- src1,
+ src_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -1016,11 +1068,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ srl(at, src1, 31);
+ __ srl(at, src_exponent, 31);
__ subu(tmp, scratch, at);
__ Branch(not_int32, gt, tmp, Operand(30));
// - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src2, 0x3fffff);
+ __ And(tmp, src_mantissa, 0x3fffff);
__ Branch(not_int32, ne, tmp, Operand(zero_reg));
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1031,20 +1083,20 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Get the 32 higher bits of the mantissa in dst.
__ Ext(dst,
- src2,
+ src_mantissa,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+ __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
__ or_(dst, dst, at);
// Create the mask and test the lower bits (of the higher bits).
__ li(at, 32);
__ subu(scratch, at, scratch);
- __ li(src2, 1);
- __ sllv(src1, src2, scratch);
- __ Subu(src1, src1, Operand(1));
- __ And(src1, dst, src1);
- __ Branch(not_int32, ne, src1, Operand(zero_reg));
+ __ li(src_mantissa, 1);
+ __ sllv(src_exponent, src_mantissa, scratch);
+ __ Subu(src_exponent, src_exponent, Operand(1));
+ __ And(src_exponent, dst, src_exponent);
+ __ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
}
@@ -1067,7 +1119,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ push(ra);
__ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
if (!IsMipsSoftFloatABI) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// We are not using MIPS FPU instructions, and parameters for the runtime
// function call are prepaired in a0-a3 registers, but function we are
// calling is compiled with hard-float flag and expecting hard float ABI
@@ -1083,7 +1135,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
// Store answer in the overwritable heap number.
if (!IsMipsSoftFloatABI) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Double returned in register f0.
__ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else {
@@ -1119,11 +1171,12 @@ bool WriteInt32ToHeapNumberStub::IsPregenerated() {
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -1183,48 +1236,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
- bool never_nan_nan) {
+ Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
__ Branch(&not_identical, ne, a0, Operand(a1));
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cc != eq || !never_nan_nan) {
- __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == less || cc == greater) {
- __ GetObjectType(a0, t4, t4);
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ GetObjectType(a0, t4, t4);
- __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == less_equal || cc == greater_equal) {
- __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Branch(&return_equal, ne, a0, Operand(t2));
- if (cc == le) {
- // undefined <= undefined should fail.
- __ li(v0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ li(v0, Operand(LESS));
- }
- __ Ret();
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
}
+ __ Ret();
}
}
}
@@ -1240,46 +1288,44 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- if (cc != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ And(t3, t2, Operand(exp_mask_reg));
- // If all bits not set (ne cond), then not a NaN, objects are equal.
- __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
- // Or with all low-bits of mantissa.
- __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ Or(v0, t3, Operand(t2));
- // For equal we already have the right value in v0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load v0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq, v0, Operand(zero_reg));
- if (cc == le) {
- __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(&not_identical);
}
@@ -1313,7 +1359,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Rhs is a smi, lhs is a number.
// Convert smi rhs to double.
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ sra(at, rhs, kSmiTagSize);
__ mtc1(at, f14);
__ cvt_d_w(f14, f14);
@@ -1327,7 +1373,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ mov(t6, rhs);
ConvertToDoubleStub stub1(a1, a0, t6, t5);
__ push(ra);
- __ Call(stub1.GetCode());
+ __ Call(stub1.GetCode(masm->isolate()));
__ pop(ra);
}
@@ -1352,7 +1398,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Lhs is a smi, rhs is a number.
// Convert smi lhs to double.
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ sra(at, lhs, kSmiTagSize);
__ mtc1(at, f12);
__ cvt_d_w(f12, f12);
@@ -1362,7 +1408,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ mov(t6, lhs);
ConvertToDoubleStub stub2(a3, a2, t6, t5);
__ push(ra);
- __ Call(stub2.GetCode());
+ __ Call(stub2.GetCode(masm->isolate()));
__ pop(ra);
// Load rhs to a double in a1, a0.
if (rhs.is(a0)) {
@@ -1380,7 +1426,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
void EmitNanCheck(MacroAssembler* masm, Condition cc) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Lhs and rhs are already loaded to f12 and f14 register pairs.
__ Move(t0, t1, f14);
__ Move(t2, t3, f12);
@@ -1447,7 +1493,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
// Exception: 0 and -0.
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Lhs and rhs are already loaded to f12 and f14 register pairs.
__ Move(t0, t1, f14);
__ Move(t2, t3, f12);
@@ -1503,7 +1549,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ pop(ra); // Because this function returns int, result is in v0.
__ Ret();
} else {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
Label equal, less_than;
__ BranchF(&equal, NULL, eq, f12, f14);
__ BranchF(&less_than, NULL, lt, f12, f14);
@@ -1553,12 +1599,13 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ // Ensure that no non-strings have the internalized bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(t2, a2, Operand(a3));
- __ And(t0, t2, Operand(kIsSymbolMask));
+ __ And(t0, t2, Operand(kIsInternalizedMask));
__ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
}
@@ -1578,7 +1625,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
} else {
@@ -1596,30 +1643,30 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
}
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
ASSERT((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of lhs.
- // Ensure that no non-strings have the symbol bit set.
+ // Ensure that no non-strings have the internalized bit set.
Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
- __ And(at, a2, Operand(kIsSymbolMask));
+ __ And(at, a2, Operand(kIsInternalizedMask));
__ Branch(possible_strings, eq, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
__ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
- __ And(at, a3, Operand(kIsSymbolMask));
+ __ And(at, a3, Operand(kIsInternalizedMask));
__ Branch(possible_strings, eq, at, Operand(zero_reg));
- // Both are symbols. We already checked they weren't the same pointer
- // so they are not equal.
+ // Both are internalized strings. We already checked they weren't the same
+ // pointer so they are not equal.
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1)); // Non-zero indicates not equal.
@@ -1673,7 +1720,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ CheckMap(object,
scratch1,
Heap::kHeapNumberMapRootIndex,
@@ -1752,43 +1799,61 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
-// On exit, v0 is 0, positive, or negative (smi) to indicate the result
-// of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles;
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ Or(a2, a1, a0);
- __ JumpIfNotSmi(a2, &not_two_smis);
- __ sra(a1, a1, 1);
- __ sra(a0, a0, 1);
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a1, a0);
- __ bind(&not_two_smis);
- } else if (FLAG_debug_code) {
- __ Or(a2, a1, a0);
- __ And(a2, a2, kSmiTagMask);
- __ Assert(ne, "CompareStub: unexpected smi operands.",
- a2, Operand(zero_reg));
- }
+// On entry a1 and a2 are the values to be compared.
+// On exit a0 is 0, positive or negative to indicate the result of
+// the comparison.
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = a1;
+ Register rhs = a0;
+ Condition cc = GetCondition();
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &not_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a1, a0);
+ __ bind(&not_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ And(t2, lhs_, Operand(rhs_));
+ __ And(t2, lhs, Operand(rhs));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1798,8 +1863,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// In cases 3 and 4 we have found out we were dealing with a number-number
// comparison and the numbers have been loaded into f12 and f14 as doubles,
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_,
- &both_loaded_as_doubles, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs,
+ &both_loaded_as_doubles, &slow, strict());
__ bind(&both_loaded_as_doubles);
// f12, f14 are the double representations of the left hand side
@@ -1808,7 +1873,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
Label nan;
__ li(t0, Operand(LESS));
__ li(t1, Operand(GREATER));
@@ -1835,7 +1900,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
@@ -1844,61 +1909,64 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc_);
+ EmitNanCheck(masm, cc);
// Compares two doubles that are not NaNs. Returns the answer.
// Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ EmitTwoNonNanDoubleComparison(masm, cc);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
- Label check_for_symbols;
+ Label check_for_internalized_strings;
Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
// In this case a2 will contain the type of lhs_.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
- &check_for_symbols,
+ &check_for_internalized_strings,
&flat_string_check);
- __ bind(&check_for_symbols);
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
+ __ bind(&check_for_internalized_strings);
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two
+ // detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that a2 is the type of lhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForInternalizedStringsOrObjects(
+ masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
a2,
a3,
t0);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
a2,
a3,
t0,
@@ -1909,18 +1977,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
// a1 (rhs) second.
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
+ ASSERT(cc == gt || cc == ge); // Remaining cases.
ncr = LESS;
}
__ li(a0, Operand(Smi::FromInt(ncr)));
@@ -1930,6 +1998,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -1937,7 +2008,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses FPU instructions.
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
Label patch;
const Register map = t5.is(tos_) ? t3 : t5;
@@ -2052,7 +2123,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
@@ -2066,7 +2137,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ MultiPopFPU(kCallerSavedFPU);
}
@@ -2098,8 +2169,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
case UnaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
+ case UnaryOpIC::NUMBER:
+ GenerateNumberStub(masm);
break;
case UnaryOpIC::GENERIC:
GenerateGenericStub(masm);
@@ -2179,13 +2250,13 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
- GenerateHeapNumberStubSub(masm);
+ GenerateNumberStubSub(masm);
break;
case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
+ GenerateNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -2193,7 +2264,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
Label non_smi, slow, call_builtin;
GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
__ bind(&non_smi);
@@ -2205,7 +2276,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
@@ -2299,7 +2370,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
if (CpuFeatures::IsSupported(FPU)) {
// Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ mtc1(a1, f0);
__ cvt_d_w(f0, f0);
__ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
@@ -2308,7 +2379,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
// have to set up a frame.
WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ bind(&impossible);
@@ -2370,20 +2441,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(a1, a0);
__ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ li(a1, Operand(Smi::FromInt(op_)));
- __ li(a0, Operand(Smi::FromInt(operands_type_)));
- __ Push(a2, a1, a0);
+ __ push(a2);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
@@ -2394,59 +2468,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op) {
Register left = a1;
Register right = a0;
@@ -2457,7 +2480,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ AdduAndCheckForOverflow(v0, left, right, scratch1);
__ RetOnNoOverflow(scratch1);
@@ -2600,10 +2623,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode) {
Register left = a1;
Register right = a0;
Register scratch1 = t3;
@@ -2615,11 +2652,17 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ AssertSmi(left);
__ AssertSmi(right);
}
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
+ }
Register heap_number_map = t2;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2629,25 +2672,42 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// depending on whether FPU is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(FPU) &&
- op_ != Token::MOD ?
+ op != Token::MOD ?
FloatingPointHelper::kFPURegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = s0;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
+ // Load right operand to f14 or a2/a3.
+ if (right_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, right, destination, f14, f16, a2, a3, heap_number_map,
+ scratch1, scratch2, f2, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, right, f14, a2, a3, heap_number_map,
+ scratch1, scratch2, fail);
+ }
+ // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
+ // jumps to |miss|.
+ if (left_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, left, destination, f12, f16, a0, a1, heap_number_map,
+ scratch1, scratch2, f2, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, left, f12, a0, a1, heap_number_map,
+ scratch1, scratch2, fail);
+ }
}
// Calculate the result.
@@ -2655,8 +2715,8 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Using FPU registers:
// f12: Left value.
// f14: Right value.
- CpuFeatures::Scope scope(FPU);
- switch (op_) {
+ CpuFeatureScope scope(masm, FPU);
+ switch (op) {
case Token::ADD:
__ add_d(f10, f12, f14);
break;
@@ -2682,7 +2742,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
+ op,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2722,7 +2782,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
not_numbers);
}
Label result_not_a_smi;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
__ Or(a2, a3, Operand(a2));
break;
@@ -2772,8 +2832,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
}
// a2: Answer as signed int32.
@@ -2786,9 +2847,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
if (CpuFeatures::IsSupported(FPU)) {
// Convert the int32 in a2 to the heap number in a0. As
// mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ mtc1(a2, f0);
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ Cvt_d_uw(f0, f0, f22);
} else {
__ cvt_d_w(f0, f0);
@@ -2815,12 +2876,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode) {
Label not_smis;
Register left = a1;
@@ -2833,12 +2896,14 @@ void BinaryOpStub::GenerateSmiCode(
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, &not_smis, op, mode);
}
__ bind(&not_smis);
}
@@ -2850,14 +2915,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
+ mode_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2865,22 +2930,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2909,7 +2966,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
Register left = a1;
Register right = a0;
@@ -2932,7 +2989,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label skip;
__ Or(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2942,6 +2999,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
@@ -2954,6 +3020,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
right,
destination,
f14,
+ f16,
a2,
a3,
heap_number_map,
@@ -2965,6 +3032,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
left,
destination,
f12,
+ f16,
t0,
t1,
heap_number_map,
@@ -2974,7 +3042,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&transition);
if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -3001,9 +3069,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- f10,
scratch1,
+ f10,
+ at,
+ f16,
except_flag);
if (result_type_ <= BinaryOpIC::INT32) {
@@ -3012,7 +3081,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check if the result fits in a smi.
- __ mfc1(scratch1, single_scratch);
__ Addu(scratch2, scratch1, Operand(0x40000000));
// If not try to return a heap number.
__ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
@@ -3034,16 +3102,17 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
// Return a heap number, or fall through to type transition or runtime
// call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER
: BinaryOpIC::INT32)) {
// We are using FPU registers so s0 is available.
heap_number_result = s0;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
__ mov(v0, heap_number_result);
__ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
@@ -3061,12 +3130,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Allocate a heap number to store the result.
heap_number_result = s0;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime,
+ mode_);
// Load the left value from the value saved on the stack.
__ Pop(a1, a0);
@@ -3105,6 +3175,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
+ f2,
&transition);
FloatingPointHelper::LoadNumberAsInt32(masm,
right,
@@ -3114,6 +3185,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
+ f2,
&transition);
// The ECMA-262 standard specifies that, for shift operations, only the
@@ -3175,15 +3247,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
heap_number_result = t1;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
if (op_ != Token::SHR) {
// Convert the result to a floating point value.
@@ -3224,6 +3297,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3257,25 +3331,37 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
__ bind(&done);
- GenerateHeapNumberStub(masm);
+ GenerateNumberStub(masm);
}
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
+ Label call_runtime, call_string_add_or_runtime, transition;
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3283,6 +3369,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3318,63 +3405,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
-
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(a0) && !result.is(a1));
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3387,7 +3431,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(
__ mov(result, overwritable_operand);
__ bind(&allocated);
} else {
- ASSERT(mode_ == NO_OVERWRITE);
+ ASSERT(mode == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -3416,7 +3460,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const bool tagged = (argument_type_ == TAGGED);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
if (tagged) {
// Argument is a number and is on stack and in a0.
@@ -3526,7 +3570,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1);
} else {
ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
Label no_update;
Label skip_cache;
@@ -3654,7 +3698,7 @@ void InterruptStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope fpu_scope(FPU);
+ CpuFeatureScope fpu_scope(masm, FPU);
const Register base = a1;
const Register exponent = a2;
const Register heapnumbermap = t1;
@@ -3708,9 +3752,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
- double_exponent,
scratch,
+ double_exponent,
+ at,
+ double_scratch,
scratch2,
kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
@@ -3768,7 +3813,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
+ __ PrepareCallCFunction(0, 2, scratch2);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
@@ -3779,7 +3824,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&int_exponent_convert);
- __ mfc1(scratch, single_scratch);
}
// Calculate power with integer exponent.
@@ -3880,31 +3924,53 @@ bool CEntryStub::IsPregenerated() {
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub save_doubles(1, mode);
+ StoreBufferOverflowStub stub(mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
+ save_doubles_code = *save_doubles.GetCode(isolate);
+ save_doubles_code->set_is_pregenerated(true);
+
+ Code* store_buffer_overflow_code = *stub.GetCode(isolate);
+ store_buffer_overflow_code->set_is_pregenerated(true);
+ }
+ isolate->set_fp_stubs_generated(true);
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
code->set_is_pregenerated(true);
}
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ andi(scratch, value, 0xf);
+ __ Branch(oom_label, eq, scratch, Operand(0xf));
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -4011,14 +4077,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ Branch(&retry, eq, t0, Operand(zero_reg));
// Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(USE_DELAY_SLOT,
- throw_out_of_memory_exception,
- eq,
- v0,
- Operand(reinterpret_cast<int32_t>(out_of_memory)));
- // If we throw the OOM exception, the value of a3 doesn't matter.
- // Any instruction can be in the delay slot that's not a jump.
+ JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
@@ -4105,13 +4164,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
- __ li(a0, Operand(false, RelocInfo::NONE));
+ __ li(a0, Operand(false, RelocInfo::NONE32));
__ li(a2, Operand(external_caught));
__ sw(a0, MemOperand(a2));
// Set pending exception and v0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
+ Label already_have_failure;
+ JumpIfOOM(masm, v0, t0, &already_have_failure);
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
__ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ bind(&already_have_failure);
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(a2));
@@ -4143,7 +4205,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ MultiPush(kCalleeSaved | ra.bit());
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
// Set up the reserved register for 0.0.
@@ -4292,7 +4354,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Restore callee-saved fpu registers.
__ MultiPopFPU(kCalleeSavedFPU);
}
@@ -4482,12 +4544,177 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+void ArrayLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadArrayLength(masm, receiver, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->prototype_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
+ support_wrapper_);
+
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ Register receiver;
+ Register value;
+ if (kind() == Code::KEYED_STORE_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a1,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a2;
+ value = a0;
+ } else {
+ ASSERT(kind() == Code::STORE_IC);
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : key
+ // -----------------------------------
+ receiver = a1;
+ value = a0;
+ }
+ Register scratch = a3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ GetObjectType(scratch, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&miss, eq, scratch, Operand(at));
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+}
+
+
Register InstanceofStub::left() { return a0; }
Register InstanceofStub::right() { return a1; }
+void LoadFieldStub::Generate(MacroAssembler* masm) {
+ StubCompiler::DoGenerateFastPropertyLoad(masm, v0, reg_, inobject_, index_);
+ __ Ret();
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -4910,8 +5137,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
- Label runtime, invoke_regexp;
-
+ Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
@@ -4963,149 +5189,111 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ lw(a2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since a2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ Addu(a2, a2, Operand(2)); // a2 was a smi.
- // Check that the static offsets vector buffer is large enough.
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ Branch(
- &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
-
- // a2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
- __ lw(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- __ GetObjectType(subject, a0, a0);
- __ And(a0, a0, Operand(kIsNotStringMask));
- STATIC_ASSERT(kStringTag == 0);
- __ Branch(&runtime, ne, a0, Operand(zero_reg));
-
- // Get the length of the string to r3.
- __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
-
- // a2: Number of capture registers
- // a3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(a0, &runtime);
- __ Branch(&runtime, ls, a3, Operand(a0));
-
- // a2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(a0, &runtime);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
- // Check that the JSArray is in fast case.
- __ lw(last_match_info_elements,
- FieldMemOperand(a0, JSArray::kElementsOffset));
- __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ Branch(&runtime, ne, a0, Operand(
- isolate->factory()->fixed_array_map()));
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ lw(a0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
- __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
- __ Branch(&runtime, gt, a2, Operand(at));
+ &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
// Reset offset for possibly sliced string.
__ mov(t0, zero_reg);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
+ __ lw(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ mov(a3, subject); // Make a copy of the original subject string.
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
+ // subject: subject string
+ // a3: subject string
+ // a0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */,
+ check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
+ not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
__ And(a1,
a0,
Operand(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ Branch(&seq_string, eq, a1, Operand(zero_reg));
+ __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
- // subject: Subject string
- // a0: instance type if Subject string
- // regexp_data: RegExp data (FixedArray)
- // a1: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ // (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
- __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
+ // Go to (6).
+ __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
-
- // String is sliced.
- __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ sra(t0, t0, kSmiTagSize);
- __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- // t5: offset of sliced string, smi-tagged.
- __ jmp(&check_encoding);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(a1, Heap::kempty_stringRootIndex);
__ Branch(&runtime, ne, a0, Operand(a1));
__ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- // Is first part of cons or parent of slice a flat string?
- __ bind(&check_encoding);
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
- __ Branch(&external_string, ne, at, Operand(zero_reg));
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // a0: Instance type of subject string
+ // subject: sequential subject string (or look-alike, external string)
+ // a3: original subject string
+ // Load previous index and check range before a3 is overwritten. We have to
+ // use a3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(a1, &runtime);
+ __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
+ __ Branch(&runtime, ls, a3, Operand(a1));
+ __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
+
STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kAsciiStringTag == 4);
+ STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+ // (E) Carry on. String handling is done.
+ // t9: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(t9, &runtime);
- // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // t9: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
- __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
-
// a1: previous index
// a3: encoding of subject string (1 if ASCII, 0 if two_byte);
// t9: code
@@ -5200,9 +5388,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
-
// Check the result.
-
Label success;
__ Branch(&success, eq, v0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
@@ -5243,10 +5429,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(a1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ // Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ Addu(a1, a1, Operand(2)); // a1 was a smi.
+ __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
+ // Check that the JSArray is in fast case.
+ __ lw(last_match_info_elements,
+ FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(at));
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ lw(a0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ sra(at, a0, kSmiTagSize);
+ __ Branch(&runtime, gt, a2, Operand(at));
+
// a1: number of capture registers
// subject: subject string
// Store the capture count.
@@ -5260,10 +5465,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(a2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
- a2,
+ subject,
t3,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
+ __ mov(subject, a2);
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
@@ -5305,8 +5511,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
__ DropAndRet(4);
- // External string. Short external strings have already been ruled out.
- // a0: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(&not_seq_nor_cons);
+ // Go to (8).
+ __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
@@ -5322,15 +5537,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ jmp(&seq_string);
+ __ jmp(&seq_string); // Go to (5).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into t0 and replace subject string with parent.
+ __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ sra(t0, t0, kSmiTagSize);
+ __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -5422,12 +5646,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a1 : the function to call
// a2 : cache cell for call target
+ ASSERT(!FLAG_optimize_constructed_arrays);
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -5464,6 +5689,78 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // a1 : the function to call
+ // a2 : cache cell for call target
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
+
+ // Load the cache state into a3.
+ __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Branch(&done, eq, a3, Operand(a1));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done, eq, a3, Operand(at));
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ Branch(&miss, ne, a3, Operand(terminal_kind_sentinel));
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&megamorphic, ne, a1, Operand(a3));
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&initialize, eq, a3, Operand(at));
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&not_array_function, ne, a1, Operand(a3));
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ GetInitialFastElementsKind());
+ __ li(a3, Operand(initial_kind_sentinel));
+ __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ Branch(&done);
+
+ __ bind(&not_array_function);
+ __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
// a2 : cache cell for call target
@@ -5496,7 +5793,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Invoke the function now.
@@ -5534,8 +5835,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Check for function proxy.
__ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
__ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
__ SetCallKind(t1, CALL_AS_METHOD);
{
@@ -5570,13 +5871,19 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? a3 : a2;
+ __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
// a0: number of arguments
@@ -5592,52 +5899,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
- stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(lhs_.is(a0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5748,11 +6016,11 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ And(t0,
code_,
Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -5785,23 +6053,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
@@ -5947,7 +6198,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -5960,7 +6211,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ Subu(scratch, c1, Operand(static_cast<int>('0')));
__ Branch(&not_array_index,
@@ -5995,43 +6246,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load symbol table.
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+ // Load string table.
+ // Load address of first element of the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
Register undefined = scratch4;
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
- __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
__ sra(mask, mask, 1);
__ Addu(mask, mask, -1);
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ Addu(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+ // Calculate untagged address of the first element of the string table.
+ Register first_string_table_element = string_table;
+ __ Addu(first_string_table_element, string_table,
+ Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
// Registers.
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
// mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
+ // first_string_table_element: address of the first element of
+ // the string table
// undefined: the undefined object
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
if (i > 0) {
- __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
} else {
__ mov(candidate, hash);
}
@@ -6039,9 +6290,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ And(candidate, candidate, Operand(mask));
// Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_symbol_table_element);
+ __ Addu(scratch, scratch, first_string_table_element);
__ lw(candidate, MemOperand(scratch));
// If entry is undefined no string with this hash can be found.
@@ -6053,7 +6304,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Must be the hole (deleted entry).
if (FLAG_debug_code) {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "oddball in symbol table is not undefined or the hole",
+ __ Assert(eq, "oddball in string table is not undefined or the hole",
scratch, Operand(candidate));
}
__ jmp(&next_probe[i]);
@@ -6071,8 +6322,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
- __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+ __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
__ bind(&next_probe[i]);
}
@@ -6081,7 +6332,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
__ mov(v0, result);
}
@@ -6182,6 +6433,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, ne, t0, Operand(zero_reg));
+ Label single_char;
+ __ Branch(&single_char, eq, a2, Operand(1));
+
// Short-cut for the case of trivial substring.
Label return_v0;
// v0: original string
@@ -6211,7 +6465,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Branch(&sliced_string, ne, t0, Operand(zero_reg));
// Cons string. Check whether it is flat, then fetch first part.
__ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(t0, Heap::kempty_stringRootIndex);
__ Branch(&runtime, ne, t1, Operand(t0));
__ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
// Update instance type.
@@ -6250,7 +6504,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
@@ -6288,12 +6542,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
@@ -6304,13 +6558,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Addu(t1, t1, a3);
// Locate first character of result.
- __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string
// a1: first character of result string
// a2: result string length
// t1: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_v0);
@@ -6342,6 +6596,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // v0: original string
+ // a1: instance type
+ // a2: length
+ // a3: from index (untagged)
+ __ SmiTag(a3, a3);
+ StringCharAtGenerator generator(
+ v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ DropAndRet(3);
+ generator.SkipSlow(masm, &runtime);
}
@@ -6442,7 +6708,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ Addu(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ Addu(left, left, Operand(scratch1));
__ Addu(right, right, Operand(scratch1));
__ Subu(length, zero_reg, length);
@@ -6582,8 +6848,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Adding two lengths can't overflow.
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ Addu(t2, a2, Operand(a3));
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return a string here.
__ Branch(&longer_than_two, ne, t2, Operand(2));
// Check that both strings are non-external ASCII strings.
@@ -6597,13 +6863,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+ __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6616,7 +6882,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode).
__ li(t2, Operand(2));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6663,11 +6929,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ And(at, t0, Operand(kAsciiDataHintMask));
__ and_(at, at, t1);
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
-
- __ xor_(t0, t0, t1);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ Xor(t0, t0, Operand(t1));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
+ __ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
+ __ Branch(&ascii_data, eq, t0,
+ Operand(kOneByteStringTag | kAsciiDataHintTag));
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
@@ -6700,11 +6966,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t0, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_first_add;
__ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_first_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6715,11 +6981,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_second_add;
__ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_second_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6740,7 +7006,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string.
// t3: first character of first string.
// a1: first character of second string
@@ -6828,7 +7094,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
@@ -6849,30 +7115,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ And(a2, a1, Operand(a0));
- __ JumpIfSmi(a2, &generic_stub);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or FPU is unsupported.
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
// Load left and right operand.
- __ Subu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(a0, &right_smi);
+ __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
__ Subu(a2, a0, Operand(kHeapObjectTag));
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(a2, a0); // Can't clobber a0 yet.
+ FPURegister single_scratch = f6;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f2, single_scratch);
+
+ __ bind(&left);
+ __ JumpIfSmi(a1, &left_smi);
+ __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(a2, a1); // Can't clobber a1 yet.
+ single_scratch = f8;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f0, single_scratch);
+
+ __ bind(&done);
// Return a result of -1, 0, or 1, or use CompareStub for NaNs.
Label fpu_eq, fpu_lt;
@@ -6896,15 +7185,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
__ bind(&unordered);
-
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
__ bind(&generic_stub);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
+ __ JumpIfSmi(a1, &unordered);
__ GetObjectType(a1, a2, a2);
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ jmp(&unordered);
@@ -6921,8 +7211,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -6934,14 +7224,14 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
// Check that both operands are heap objects.
__ JumpIfEitherSmi(left, right, &miss);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(tmp1, tmp1, Operand(tmp2));
- __ And(tmp1, tmp1, kIsSymbolMask);
+ __ And(tmp1, tmp1, kIsInternalizedMask);
__ Branch(&miss, eq, tmp1, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
@@ -6949,7 +7239,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(v0, right);
- // Symbols are compared by identity.
+ // Internalized strings are compared by identity.
__ Ret(ne, left, Operand(right));
__ li(v0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
@@ -6959,8 +7249,62 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
}
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ Label succeed1;
+ __ And(at, tmp1, Operand(kIsInternalizedMask));
+ __ Branch(&succeed1, ne, at, Operand(zero_reg));
+ __ Branch(&miss, ne, tmp1, Operand(SYMBOL_TYPE));
+ __ bind(&succeed1);
+
+ Label succeed2;
+ __ And(at, tmp2, Operand(kIsInternalizedMask));
+ __ Branch(&succeed2, ne, at, Operand(zero_reg));
+ __ Branch(&miss, ne, tmp2, Operand(SYMBOL_TYPE));
+ __ bind(&succeed2);
+
+ // Use a0 as result
+ __ mov(v0, a0);
+
+ // Unique names are compared by identity.
+ Label done;
+ __ Branch(&done, ne, left, Operand(right));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6999,13 +7343,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
- // Check that both strings are symbols. If they are, we're done
+ // Check that both strings are internalized strings. If they are, we're done
// because we already know they are not identical.
if (equality) {
ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(tmp3, tmp1, Operand(tmp2));
- __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+ __ And(tmp5, tmp3, Operand(kIsInternalizedMask));
Label is_symbol;
__ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
@@ -7045,7 +7389,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
@@ -7149,10 +7493,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
// Push return address (accessible to GC through exit frame pc).
// This spot for ra was reserved in EnterExitFrame.
masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
- masm->li(ra,
- Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET),
- CONSTANT_SIZE);
+ intptr_t loc =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
// Call the function.
masm->Jump(t9);
// Make sure the stored 'ra' points to this position.
@@ -7160,13 +7503,14 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -7180,10 +7524,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ lw(index, FieldMemOperand(properties, kCapacityOffset));
__ Subu(index, index, Operand(1));
__ And(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+ Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
__ sll(at, index, 1);
__ Addu(index, index, at);
@@ -7204,19 +7548,20 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
// Stop if found the property.
- __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
+ __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
- Label the_hole;
- __ Branch(&the_hole, eq, entity_name, Operand(tmp));
+ Label good;
+ __ Branch(&good, eq, entity_name, Operand(tmp));
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ And(scratch0, entity_name, Operand(kIsSymbolMask));
- __ Branch(miss, eq, scratch0, Operand(zero_reg));
+ __ And(scratch0, entity_name, Operand(kIsInternalizedMask));
+ __ Branch(&good, ne, scratch0, Operand(zero_reg));
+ __ Branch(miss, ne, entity_name, Operand(SYMBOL_TYPE));
- __ bind(&the_hole);
+ __ bind(&good);
// Restore the properties.
__ lw(properties,
@@ -7230,8 +7575,8 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ MultiPush(spill_mask);
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ li(a1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ li(a1, Operand(Handle<Name>(name)));
+ NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ mov(at, v0);
__ MultiPop(spill_mask);
@@ -7241,23 +7586,23 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
+// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
ASSERT(!elements.is(scratch1));
ASSERT(!elements.is(scratch2));
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- __ AssertString(name);
+ __ AssertName(name);
// Compute the capacity mask.
__ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
@@ -7269,21 +7614,21 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// cover ~93% of loads from dictionaries.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ Addu(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
- __ srl(scratch2, scratch2, String::kHashShift);
+ __ srl(scratch2, scratch2, Name::kHashShift);
__ And(scratch2, scratch1, scratch2);
// Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ sll(at, scratch2, 1);
@@ -7310,7 +7655,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ Move(a0, elements);
__ Move(a1, name);
}
- StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
__ mov(scratch2, a2);
__ mov(at, v0);
@@ -7321,15 +7666,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers:
- // result: StringDictionary to probe
+ // result: NameDictionary to probe
// a1: key
- // : StringDictionary to probe.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
+ // dictionary: NameDictionary to probe.
+ // index: will hold an index of entry if lookup is successful.
+ // might alias with result_.
// Returns:
// result_ is zero if lookup failed, non zero otherwise.
@@ -7348,7 +7693,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ sra(mask, mask, kSmiTagSize);
__ Subu(mask, mask, Operand(1));
- __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -7359,18 +7704,18 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
__ Addu(index, hash, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
} else {
__ mov(index, hash);
}
- __ srl(index, index, String::kHashShift);
+ __ srl(index, index, Name::kHashShift);
__ And(index, mask, index);
// Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
+ ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
__ mov(at, index);
__ sll(index, index, 1);
@@ -7389,12 +7734,15 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ Branch(&in_dictionary, eq, entry_key, Operand(key));
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a unique name.
+ Label cont;
__ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ lbu(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ And(result, entry_key, Operand(kIsSymbolMask));
- __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+ __ And(result, entry_key, Operand(kIsInternalizedMask));
+ __ Branch(&cont, ne, result, Operand(zero_reg));
+ __ Branch(&maybe_in_dictionary, ne, entry_key, Operand(SYMBOL_TYPE));
+ __ bind(&cont);
}
}
@@ -7427,7 +7775,6 @@ struct AheadOfTimeWriteBarrierStubList {
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
@@ -7483,13 +7830,14 @@ bool StoreBufferOverflowStub::IsPregenerated() {
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7498,7 +7846,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7600,12 +7948,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(a0));
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(a1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ lw(a1, MemOperand(address, 0));
- }
+ __ Move(a1, address);
__ li(a2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
@@ -7767,7 +8110,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a1,
+ __ StoreNumberToDoubleElements(a0, a3,
// Overwrites all regs after this.
t1, t2, t3, t5, a2,
&slow_elements);
@@ -7776,6 +8119,21 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ ASSERT(!Serializer::enabled());
+ bool save_fp_regs = CpuFeatures::IsSupported(FPU);
+ CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ lw(a1, MemOperand(fp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ sll(a1, a1, kPointerSizeLog2);
+ __ Addu(sp, sp, a1);
+ __ Ret();
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
ProfileEntryHookStub stub;
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index e0954d837e..225accc518 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -37,7 +37,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -59,7 +59,7 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -67,7 +67,7 @@ class StoreBufferOverflowStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -78,7 +78,7 @@ class StoreBufferOverflowStub: public CodeStub {
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -120,9 +120,9 @@ class UnaryOpStub: public CodeStub {
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateNumberStub(MacroAssembler* masm);
+ void GenerateNumberStubSub(MacroAssembler* masm);
+ void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
@@ -143,108 +143,6 @@ class UnaryOpStub: public CodeStub {
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_fpu_ = CpuFeatures::IsSupported(FPU);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_fpu_(FPUBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_fpu_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class FPUBits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FPUBits::encode(use_fpu_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -275,14 +173,14 @@ class StringHelper : public AllStatic {
int flags);
- // Probe the symbol table for a two character string. If the string is
+ // Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
+ // does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -322,7 +220,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -345,7 +243,7 @@ class StringAddStub: public CodeStub {
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -357,7 +255,7 @@ class SubStringStub: public CodeStub {
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -398,7 +296,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@@ -415,7 +313,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
}
bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int_;
@@ -442,7 +340,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -468,7 +366,7 @@ class NumberToStringStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -492,7 +390,7 @@ class RecordWriteStub: public CodeStub {
};
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@@ -586,7 +484,7 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
masm->MultiPushFPU(kCallerSavedFPU);
}
}
@@ -594,7 +492,7 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
masm->MultiPopFPU(kCallerSavedFPU);
}
masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
@@ -614,7 +512,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
@@ -673,7 +571,7 @@ class RecordWriteStub: public CodeStub {
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public CodeStub {
+class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
@@ -691,7 +589,7 @@ class RegExpCEntryStub: public CodeStub {
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
@@ -724,20 +622,6 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
- // Loads objects from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will still be scratched. If
- // either a0 or a1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with a0 and a1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -773,6 +657,7 @@ class FloatingPointHelper : public AllStatic {
Register object,
Destination destination,
FPURegister double_dst,
+ FPURegister double_scratch,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -794,7 +679,8 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Register scratch3,
- FPURegister double_scratch,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
Label* not_int32);
// Generate non FPU code to check if a double can be exactly represented by a
@@ -834,7 +720,12 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
- private:
+ // Loads the objects from |object| into floating point registers.
+ // Depending on |destination| the value ends up either in |dst| or
+ // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
+ // must be supported. If kCoreRegisters are requested and FPU is
+ // supported, |dst| will be scratched. If |object| is neither smi nor
+ // heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
@@ -848,11 +739,11 @@ class FloatingPointHelper : public AllStatic {
};
-class StringDictionaryLookupStub: public CodeStub {
+class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+ explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -861,7 +752,7 @@ class StringDictionaryLookupStub: public CodeStub {
Label* done,
Register receiver,
Register properties,
- Handle<String> name,
+ Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
@@ -879,14 +770,14 @@ class StringDictionaryLookupStub: public CodeStub {
static const int kTotalProbes = 20;
static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 44e0359e44..f5cb5e4892 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
+#include "simulator-mips.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,6 +49,75 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_mips_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())->CallFP(
+ fast_exp_mips_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!CpuFeatures::IsSupported(FPU)) return &exp;
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ CpuFeatureScope use_fpu(&masm, FPU);
+ DoubleRegister input = f12;
+ DoubleRegister result = f0;
+ DoubleRegister double_scratch1 = f4;
+ DoubleRegister double_scratch2 = f6;
+ Register temp1 = t0;
+ Register temp2 = t1;
+ Register temp3 = t2;
+
+ if (!IsMipsSoftFloatABI) {
+ // Input value is in f12 anyway, nothing to do.
+ } else {
+ __ Move(input, a0, a1);
+ }
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(
+ &masm, input, result, double_scratch1, double_scratch2,
+ temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ if (!IsMipsSoftFloatABI) {
+ // Result is already in f0, nothing to do.
+ } else {
+ __ Move(a0, a1, result);
+ }
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_mips_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+#undef __
+
+
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@@ -72,8 +141,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
+#define __ ACCESS_MASM(masm)
+
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_site_info_found) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -82,6 +154,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements)
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_site_info_found != NULL);
+ masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq,
+ allocation_site_info_found);
+ }
+
// Set transitioned map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
@@ -96,7 +174,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -110,6 +188,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register scratch = t6;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
@@ -176,7 +258,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
a3,
t5,
- kRAHasBeenSaved,
+ kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -196,7 +278,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Normal smi, convert to double and store.
if (fpu_supported) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ mtc1(t5, f0);
__ cvt_d_w(f0, f0);
__ sdc1(f0, MemOperand(t3));
@@ -239,7 +321,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -250,6 +332,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
@@ -389,7 +475,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(at, Heap::kempty_stringRootIndex);
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
__ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
@@ -408,7 +494,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&external_string, ne, at, Operand(zero_reg));
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Addu(string,
string,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -446,6 +532,196 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
+
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ And(at, index, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi index", at, Operand(zero_reg));
+ __ And(at, value, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi value", at, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, String::kLengthOffset));
+ __ Check(lt, "Index is too large", index, Operand(at));
+
+ __ Check(ge, "Index is negative", index, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
+ }
+
+ __ Addu(at,
+ string,
+ Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ SmiUntag(value);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ SmiUntag(index);
+ __ Addu(at, at, index);
+ __ sb(value, MemOperand(at));
+ } else {
+ // No need to untag a smi for two-byte addressing.
+ __ Addu(at, at, index);
+ __ sh(value, MemOperand(at));
+ }
+}
+
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch1));
+ ASSERT(!input.is(double_scratch2));
+ ASSERT(!result.is(double_scratch1));
+ ASSERT(!result.is(double_scratch2));
+ ASSERT(!double_scratch1.is(double_scratch2));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(!temp1.is(temp3));
+ ASSERT(!temp2.is(temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ ldc1(double_scratch1, ExpConstant(0, temp3));
+ __ Move(result, kDoubleRegZero);
+ __ BranchF(&done, NULL, ge, double_scratch1, input);
+ __ ldc1(double_scratch2, ExpConstant(1, temp3));
+ __ ldc1(result, ExpConstant(2, temp3));
+ __ BranchF(&done, NULL, ge, input, double_scratch2);
+ __ ldc1(double_scratch1, ExpConstant(3, temp3));
+ __ ldc1(result, ExpConstant(4, temp3));
+ __ mul_d(double_scratch1, double_scratch1, input);
+ __ add_d(double_scratch1, double_scratch1, result);
+ __ Move(temp2, temp1, double_scratch1);
+ __ sub_d(double_scratch1, double_scratch1, result);
+ __ ldc1(result, ExpConstant(6, temp3));
+ __ ldc1(double_scratch2, ExpConstant(5, temp3));
+ __ mul_d(double_scratch1, double_scratch1, double_scratch2);
+ __ sub_d(double_scratch1, double_scratch1, input);
+ __ sub_d(result, result, double_scratch1);
+ __ mul_d(input, double_scratch1, double_scratch1);
+ __ mul_d(result, result, input);
+ __ srl(temp1, temp2, 11);
+ __ ldc1(double_scratch2, ExpConstant(7, temp3));
+ __ mul_d(result, result, double_scratch2);
+ __ sub_d(result, result, double_scratch1);
+ __ ldc1(double_scratch2, ExpConstant(8, temp3));
+ __ add_d(result, result, double_scratch2);
+ __ li(at, 0x7ff);
+ __ And(temp2, temp2, at);
+ __ Addu(temp1, temp1, Operand(0x3ff));
+ __ sll(temp1, temp1, 20);
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ sll(at, temp2, 3);
+ __ addu(at, at, temp3);
+ __ lw(at, MemOperand(at));
+ __ Addu(temp3, temp3, Operand(kPointerSize));
+ __ sll(temp2, temp2, 3);
+ __ addu(temp2, temp2, temp3);
+ __ lw(temp2, MemOperand(temp2));
+ __ Or(temp1, temp1, temp2);
+ __ Move(input, at, temp1);
+ __ mul_d(result, result, input);
+ __ bind(&done);
+}
+
+
+// nop(CODE_AGE_MARKER_NOP)
+static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found in FUNCTIONS
+ static bool initialized = false;
+ static uint32_t sequence[kNoCodeAgeSequenceLength];
+ byte* byte_sequence = reinterpret_cast<byte*>(sequence);
+ *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ if (!initialized) {
+ CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->Push(ra, fp, cp, a1);
+ patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
+ initialized = true;
+ }
+ return byte_sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = !memcmp(sequence, young_sequence, young_length);
+ ASSERT(result ||
+ Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Memory::Address_at(
+ sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ // Mark this code sequence for FindPlatformCodeAgeSequence()
+ patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Save the function's original return address
+ // (it will be clobbered by Call(t9))
+ patcher.masm()->mov(at, ra);
+ // Load the stub address to t9 and call it
+ patcher.masm()->li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
+ patcher.masm()->Call(t9);
+ // Record the stub address in the empty space for GetCodeAgeAndParity()
+ patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index e704c4f56c..d429443a88 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -46,6 +46,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@@ -70,6 +74,8 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -90,6 +96,22 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 7d654f6d62..ddfa891326 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -302,6 +302,8 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
};
break;
+ case COP1X:
+ return kRegisterType;
// 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
case REGIMM:
case BEQ:
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 3d585717cb..139e7db033 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -99,7 +99,7 @@ const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
-const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -216,6 +216,8 @@ const int kImm28Bits = 28;
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
+const int kFrBits = 5;
+const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
@@ -295,7 +297,9 @@ enum Opcode {
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift
+ SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+
+ COP1X = ((1 << 4) + 3) << kOpcodeShift
};
enum SecondaryField {
@@ -416,6 +420,8 @@ enum SecondaryField {
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
// COP1 Encoding of Function Field When rs=PS.
+ // COP1X Encoding of Function Field.
+ MADD_D = ((4 << 3) + 1),
NULLSF = 0
};
@@ -423,7 +429,9 @@ enum SecondaryField {
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions.
-// the 'U' prefix is used to specify unsigned comparisons.
+// The 'U' prefix is used to specify unsigned comparisons.
+// Oppposite conditions must be paired as odd/even numbers
+// because 'NegateCondition' function flips LSB to negate condition.
enum Condition {
// Any value < 0 is considered no_condition.
kNoCondition = -1,
@@ -444,8 +452,10 @@ enum Condition {
greater_equal = 13,
less_equal = 14,
greater = 15,
+ ueq = 16, // Unordered or Equal.
+ nue = 17, // Not (Unordered or Equal).
- cc_always = 16,
+ cc_always = 18,
// Aliases.
carry = Uless,
@@ -677,6 +687,10 @@ class Instruction {
return Bits(kFtShift + kFtBits - 1, kFtShift);
}
+ inline int FrValue() const {
+ return Bits(kFrShift + kFrBits -1, kFrShift);
+ }
+
// Float Compare condition code instruction bits.
inline int FCccValue() const {
return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 9fd815bb42..7158e4f551 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -42,11 +42,14 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+ JSFunction* function) {
+ Isolate* isolate = function->GetIsolate();
+ HandleScope scope(isolate);
AssertNoAllocation no_allocation;
- if (!function->IsOptimized()) return;
+ ASSERT(function->IsOptimized());
+ ASSERT(function->FunctionsInFunctionListShareSameCode());
// The optimized code is going to be patched, so we cannot use it
// any more. Play safe and reset the whole cache.
@@ -70,14 +73,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE);
+ RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -87,8 +90,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#endif
}
- Isolate* isolate = code->GetIsolate();
-
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
@@ -120,7 +121,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitStackCheck.
+ // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
// The call of the stack guard check has the following form:
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
@@ -170,11 +171,7 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->slt(at, a3, zero_reg);
- } else {
- patcher.masm()->sltu(at, sp, t0);
- }
+ patcher.masm()->slt(at, a3, zero_reg);
// Replace the on-stack replacement address in the load-immediate (lui/ori
// pair) with the entry address of the normal stack-check code.
@@ -209,7 +206,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@@ -243,7 +240,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -335,7 +332,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@@ -352,342 +349,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
- }
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- ASSERT(0 == output_offset);
-
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 8 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 5 stack entries from StackFrame::INTERNAL (ra, fp, cp, frame type,
- // code object, see MacroAssembler::EnterFrame). For a setter stub frame we
- // need one additional entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
@@ -705,7 +366,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -769,7 +430,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
@@ -792,7 +453,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (is_topmost) {
output_frame->SetRegister(fp.code(), fp_value);
}
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -810,7 +471,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -823,7 +484,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
@@ -871,7 +532,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -882,6 +543,29 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->register_param_count_;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ params++;
+ }
+ output_frame->SetRegister(s0.code(), params);
+ output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
+ output_frame->SetRegister(s2.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
#define __ masm()->
@@ -892,7 +576,6 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
- CpuFeatures::Scope scope(FPU);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -901,14 +584,19 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kNumAllocatableRegisters;
-
- // Save all FPU registers before messing with them.
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ sdc1(fpu_reg, MemOperand(sp, offset));
+ kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+ } else {
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
@@ -980,14 +668,17 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ ldc1(f0, MemOperand(sp, src_offset));
- __ sdc1(f0, MemOperand(a1, dst_offset));
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ ldc1(f0, MemOperand(sp, src_offset));
+ __ sdc1(f0, MemOperand(a1, dst_offset));
+ }
}
// Remove the bailout id, eventually return address, and the saved registers
@@ -1008,11 +699,14 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ Branch(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
- __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
- __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
+ __ addiu(a3, a3, sizeof(uint32_t));
+ __ bind(&pop_loop_header);
+ __ Branch(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
@@ -1027,27 +721,42 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: a0 = current "FrameDescription** output_",
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: t0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
+ __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
+ __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ lw(a2, MemOperand(a0, 0)); // output_[ix]
+ __ lw(a2, MemOperand(t0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3));
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
+ __ bind(&inner_loop_header);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
- __ Addu(a0, a0, Operand(kPointerSize));
- __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ Branch(&outer_push_loop, lt, t0, Operand(a1));
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+
+ __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
+ }
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 1d40c2c820..0eca71f2b8 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -350,6 +350,10 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
int reg = instr->FdValue();
PrintFPURegister(reg);
return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
}
UNREACHABLE();
return -1;
@@ -618,6 +622,15 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
UNREACHABLE();
}
break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft");
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index faaa0e0f48..79505ae9cb 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -30,8 +30,13 @@
#if defined(V8_TARGET_ARCH_MIPS)
+#include "assembler.h"
+#include "assembler-mips.h"
+#include "assembler-mips-inl.h"
#include "frames-inl.h"
#include "mips/assembler-mips-inl.h"
+#include "macro-assembler.h"
+#include "macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -42,6 +47,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
}
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 2ed358a913..1568ce66ec 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -193,30 +193,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-
- // Size of the MIPS 4 32-bit argument slots.
- // This is just an alias with a shorter name. Use it from now on.
- static const int kRArgsSlotsSize = 4 * kPointerSize;
- static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
-
- // JS argument slots size.
- static const int kJSArgsSlotsSize = 0 * kPointerSize;
- // Assembly builtins argument slots size.
- static const int kBArgsSlotsSize = 0 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
@@ -232,14 +208,30 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
class InternalFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 3e89fb43b4..bacec10f07 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -139,7 +139,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -147,7 +147,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -172,12 +172,13 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
+ info->set_prologue_offset(masm_->pc_offset());
+ // The following three instructions must remain together and unmodified for
+ // code aging to work properly.
__ Push(ra, fp, cp, a1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- }
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
// Adjust fp to point to caller's fp.
__ Addu(fp, sp, Operand(2 * kPointerSize));
@@ -345,45 +346,34 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Comment cmnt(masm_, "[ Stack check");
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ slt(at, a3, zero_reg);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ sltu(at, sp, t0);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -685,7 +675,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
if (CpuFeatures::IsSupported(FPU)) {
ToBooleanStub stub(result_register());
- __ CallStub(&stub);
+ __ CallStub(&stub, condition->test_id());
__ mov(at, zero_reg);
} else {
// Call the runtime to find the boolean value of the source and then
@@ -929,34 +919,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ li(a1, Operand(instance));
- __ sw(a1, ContextOperand(cp, variable->index()));
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
+ __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ sw(a1, ContextOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ a1,
+ a3,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -999,6 +988,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1049,7 +1046,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1174,7 +1171,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
@@ -1251,7 +1249,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ Branch(&loop);
// Remove the pointers stored on the stack.
@@ -1399,9 +1397,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) {
@@ -1555,7 +1553,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -1589,7 +1587,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
@@ -1600,12 +1598,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
- __ Push(a3, a2, a1, a0);
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
+ __ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
@@ -1639,7 +1638,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(a0, result_register());
@@ -1655,8 +1654,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ lw(a0, MemOperand(sp));
__ push(a0);
@@ -1670,6 +1667,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@@ -1733,7 +1741,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
+ length);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
@@ -1744,10 +1754,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode = has_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1958,7 +1975,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2042,7 +2059,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
@@ -2050,7 +2067,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2356,7 +2373,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
CallFunctionStub stub(arg_count, flags);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2402,7 +2419,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -2550,7 +2567,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ li(a2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
}
@@ -2703,7 +2720,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ LoadRoot(t0, Heap::kHashTableMapRootIndex);
__ Branch(if_false, eq, a2, Operand(t0));
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf name in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
@@ -2728,10 +2745,10 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Addu(a2, a2, t1);
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+ // string "valueOf" the result is false.
+ // The use of t2 to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
+ __ li(t2, Operand(FACTORY->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
@@ -2763,6 +2780,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
}
+void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a2);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -2962,12 +3001,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(v0, Heap::kObject_symbolRootIndex);
+ __ LoadRoot(v0, Heap::kObject_stringRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
@@ -3031,7 +3070,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm(), FPU);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
__ li(a1, Operand(0x41300000));
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
@@ -3149,6 +3188,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(a2);
+ __ pop(a1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(a2);
+ __ pop(a1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3305,7 +3376,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3620,7 +3691,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length);
__ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
__ Branch(&done);
__ bind(&non_trivial_array);
@@ -3656,7 +3727,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
__ Branch(&loop, lt, element, Operand(elements_end));
@@ -3683,7 +3754,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
__ Mult(array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3723,10 +3794,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ Addu(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ li(at, Operand(Smi::FromInt(1)));
__ Branch(&one_char_separator, eq, scratch1, Operand(at));
__ Branch(&long_separator, gt, scratch1, Operand(at));
@@ -3743,7 +3814,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
@@ -3753,7 +3824,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case.
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
__ jmp(&one_char_separator_loop_entry);
@@ -3775,7 +3846,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
@@ -3796,7 +3867,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ Addu(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
@@ -3804,7 +3875,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&long_separator_loop, lt, element, Operand(elements_end));
@@ -4004,7 +4075,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->UnaryOperationFeedbackId());
context()->Plug(v0);
}
@@ -4100,9 +4171,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
JumpPatchSite patch_site(masm_);
int count_value = expr->op() == Token::INC ? 1 : -1;
- __ li(a1, Operand(Smi::FromInt(count_value)));
-
if (ShouldInlineSmiCase(expr->op())) {
+ __ li(a1, Operand(Smi::FromInt(count_value)));
__ AdduAndCheckForOverflow(v0, a0, a1, t0);
__ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
@@ -4111,12 +4181,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitJumpIfSmi(v0, &done);
__ bind(&stub_call);
}
+ __ mov(a1, a0);
+ __ li(a0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4232,12 +4306,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(v0, if_true);
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => false.
__ GetObjectType(v0, v0, a1);
@@ -4246,16 +4320,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
@@ -4264,19 +4338,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(v0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1);
__ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
}
+ if (FLAG_harmony_symbols) {
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_true, eq, a1, Operand(SYMBOL_TYPE));
+ }
// Check for JS objects => true.
__ GetObjectType(v0, v0, a1);
__ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
@@ -4333,29 +4411,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = eq;
- break;
- case Token::LT:
- cc = lt;
- break;
- case Token::GT:
- cc = gt;
- break;
- case Token::LTE:
- cc = le;
- break;
- case Token::GTE:
- cc = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ mov(a0, result_register());
__ pop(a1);
@@ -4370,7 +4426,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index cf706815e3..e434fdb774 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -61,12 +61,12 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register scratch0,
- Register scratch1,
- Label* miss) {
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// elements: holds the property dictionary on fall through.
@@ -129,19 +129,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ And(at,
@@ -182,19 +182,19 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
@@ -215,53 +215,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
- support_wrappers);
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -352,30 +305,35 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_symbol) {
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
// The key is not a smi.
- // Is it a string?
+ Label unique;
+ // Is it a name?
__ GetObjectType(key, map, hash);
- __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
// Is the string an array index, with cached numeric value?
- __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
__ Branch(index_string, eq, at, Operand(zero_reg));
- // Is the string a symbol?
+ // Is the string internalized?
// map: key map
__ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ And(at, hash, Operand(kIsSymbolMask));
- __ Branch(not_symbol, eq, at, Operand(zero_reg));
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ And(at, hash, Operand(kIsInternalizedMask));
+ __ Branch(not_unique, eq, at, Operand(zero_reg));
+
+ __ bind(&unique);
}
@@ -473,7 +431,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack into a1.
__ lw(a1, MemOperand(sp, argc * kPointerSize));
- GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
// a0: elements
// Search the dictionary - put result in register a1.
@@ -576,11 +534,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ lw(a1, MemOperand(sp, argc * kPointerSize));
Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
+ Label check_number_dictionary, check_name, lookup_monomorphic_cache;
+ Label index_smi, index_name;
// Check that the key is a smi.
- __ JumpIfNotSmi(a2, &check_string);
+ __ JumpIfNotSmi(a2, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -627,10 +585,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ mov(a1, v0);
__ jmp(&do_call);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be a unique name.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -657,14 +615,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor a unique name,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub,
// that will get fetched next time.
__ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
GenerateMiss(masm, argc);
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(a3, a2);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
@@ -677,10 +635,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- ra : return address
// -----------------------------------
- // Check if the name is a string.
+ // Check if the name is really a name.
Label miss;
__ JumpIfSmi(a2, &miss);
- __ IsObjectJSStringType(a2, a0, &miss);
+ __ IsObjectNameType(a2, a0, &miss);
CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
@@ -700,7 +658,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -718,7 +678,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
// a1: elements
GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
@@ -858,7 +818,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -893,7 +853,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -926,7 +886,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
Object* KeyedLoadIC_Miss(Arguments args);
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -939,7 +899,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ Push(a1, a0);
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
@@ -966,7 +926,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
+ Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = a0;
@@ -975,7 +935,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
+ __ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -1014,8 +974,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
a3);
GenerateRuntimeGetProperty(masm);
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
@@ -1029,16 +989,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ Branch(&probe_dictionary, eq, t0, Operand(at));
// Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
+ // based on 32 bits of the map pointer and the name hash.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
__ sra(a3, a2, KeyedLookupCache::kMapHashShift);
- __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
- __ sra(at, t0, String::kHashShift);
+ __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset));
+ __ sra(at, t0, Name::kHashShift);
__ xor_(a3, a3, at);
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
__ And(a3, a3, Operand(mask));
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and unique name) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -1131,7 +1091,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
a3);
__ Ret();
- __ bind(&index_string);
+ __ bind(&index_name);
__ IndexFromHash(a3, key);
// Now jump to the place where smi keys are handled.
__ Branch(&index_smi);
@@ -1166,7 +1126,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -1268,7 +1228,6 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
- receiver,
elements, // Overwritten.
a3, // Scratch regs...
t0,
@@ -1296,7 +1255,9 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -1308,7 +1269,9 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1322,7 +1285,8 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1453,11 +1417,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1468,7 +1432,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// Push receiver, key and value for runtime call.
__ Push(a2, a1, a0);
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1506,7 +1470,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);
@@ -1527,7 +1493,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);
@@ -1574,62 +1542,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = a1;
- Register value = a0;
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
- masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
@@ -1639,7 +1551,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+ GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
Counters* counters = masm->isolate()->counters();
@@ -1695,36 +1607,16 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ return Assembler::IsAndImmediate(instr) &&
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
}
@@ -1736,7 +1628,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
if (!(Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
return;
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 21fd2ce481..cd489346a6 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -65,9 +65,7 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope(FPU);
-
- CodeStub::GenerateFPStubs();
+ CpuFeatureScope scope(masm(), FPU);
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
@@ -77,6 +75,7 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
GenerateSafepointTable();
}
@@ -85,7 +84,14 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
}
@@ -116,55 +122,93 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // a1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
-
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ sw(a2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ // a1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ sw(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
}
- __ Push(ra, fp, cp, a1);
- __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ Push(ra, fp, cp);
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ __ Push(ra, fp, cp, a1);
+ // Add unused load of ip to ensure prologue sequence is identical for
+ // full-codegen and lithium-codegen.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ // Adj. FP to point to saved FP.
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ }
+ frame_is_built_ = true;
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ li(a0, Operand(slots));
- __ li(a2, Operand(kSlotsZapValue));
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
+ __ push(a0);
+ __ push(a1);
+ __ Addu(a0, sp, Operand(slots * kPointerSize));
+ __ li(a1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
- __ push(a2);
- __ Subu(a0, a0, 1);
- __ Branch(&loop, ne, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ sw(a1, MemOperand(a0, 2 * kPointerSize));
+ __ Branch(&loop, ne, a0, Operand(sp));
+ __ pop(a1);
+ __ pop(a0);
} else {
__ Subu(sp, sp, Operand(slots * kPointerSize));
}
}
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in a1.
@@ -200,7 +244,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
EnsureSpaceForLazyDeopt();
@@ -221,7 +265,30 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -235,10 +302,31 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ __ pop(at);
+ __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
@@ -250,10 +338,81 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
- // TODO(plind): not clear that this will have advantage for MIPS.
- // Skipping it for now. Raised issue #100 for this.
- Abort("Unimplemented: GenerateDeoptJumpTable");
- return false;
+ // Check that the jump table is accessible from everywhere in the function
+ // code, i.e. that offsets to the table can be encoded in the 16bit signed
+ // immediate of a branch instruction.
+ // To simplify we consider the code size from the first instruction to the
+ // end of the jump table.
+ if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
+ deopt_jump_table_.length() * 12)) {
+ Abort("Generated code is too large");
+ }
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ RecordComment("[ Deoptimization jump table");
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ bind(&deopt_jump_table_[i].label);
+ Address entry = deopt_jump_table_[i].address;
+ bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
+ Deoptimizer::BailoutType type =
+ is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (deopt_jump_table_[i].needs_frame) {
+ if (is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ Branch(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Call(t9);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ Branch(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Jump(t9);
+ }
+ }
+ } else {
+ if (is_lazy_deopt) {
+ __ Call(t9);
+ } else {
+ __ Jump(t9);
+ }
+ }
+ }
+ __ RecordComment("]");
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
}
@@ -362,8 +521,6 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
return constant->Integer32Value();
}
@@ -404,37 +561,20 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return MemOperand(fp, -(index - 1) * kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()));
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, context,
- // and the first word of the double in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
- } else {
- // Incoming parameter. Skip the return address and the first word of
- // the double.
- return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ int* pushed_arguments_index,
+ int* pushed_arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -446,14 +586,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// arguments index points to the first element of a sequence of tagged
// values on the stack that represent the arguments. This needs to be
// kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
+ *pushed_arguments_index = -environment->parameter_count();
+ *pushed_arguments_count = environment->parameter_count();
WriteTranslation(environment->outer(),
translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ pushed_arguments_index,
+ pushed_arguments_count);
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -474,19 +616,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
// Inlined frames which push their arguments cause the index to be
- // bumped and a new stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
+ // bumped and another stack area to be used for materialization,
+ // otherwise actual argument values are unknown for inlined frames.
+ bool arguments_known = true;
+ int arguments_index = *pushed_arguments_index;
+ int arguments_count = *pushed_arguments_count;
+ if (environment->entry() != NULL) {
+ arguments_known = environment->entry()->arguments_pushed();
+ arguments_index = arguments_index < 0
+ ? GetStackSlotCount() : arguments_index + arguments_count;
+ arguments_count = environment->entry()->arguments_count() + 1;
+ if (environment->entry()->arguments_pushed()) {
+ *pushed_arguments_index = arguments_index;
+ *pushed_arguments_count = arguments_count;
+ }
}
for (int i = 0; i < translation_size; ++i) {
@@ -501,8 +653,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -512,8 +665,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_double_registers()[value->index()],
false,
false,
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -521,8 +675,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -531,13 +686,15 @@ void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
+ translation->StoreArgumentsObject(
+ arguments_known, arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -660,16 +817,19 @@ void LCodeGen::DeoptimizeIf(Condition cc,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
-
- if (FLAG_deopt_every_n_times == 1 &&
- info_->shared_info()->opt_count() == id) {
+ if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@@ -683,9 +843,51 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&skip);
}
- // TODO(plind): The Arm port is a little different here, due to their
- // DeOpt jump table, which is not used for Mips yet.
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool needs_lazy_deopt = info()->IsStub();
+ if (cc == al && frame_is_built_) {
+ if (needs_lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ }
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ Branch(&deopt_jump_table_.last().label, cc, src1, src2);
+ }
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ if (map->CanTransition()) {
+ maps.Add(map, zone());
+ }
+ }
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded maps after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
}
@@ -695,7 +897,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
- Handle<ByteArray> translations = translations_.CreateByteArray();
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
@@ -863,39 +1066,39 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
__ lw(a0, MemOperand(sp, 0));
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -947,6 +1150,14 @@ void LCodeGen::DoModI(LModI* instr) {
DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
}
+ // Check for (kMinInt % -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
__ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
__ mfhi(result);
@@ -980,7 +1191,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
+ // Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
@@ -994,6 +1205,18 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
+
+ __ madd_d(addend, addend, multiplier, multiplicand);
+}
+
+
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -1046,12 +1269,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ sll(result, left, shift);
} else if (IsPowerOf2(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ sll(result, left, shift);
- __ Addu(result, result, left);
+ __ sll(scratch, left, shift);
+ __ Addu(result, scratch, left);
} else if (IsPowerOf2(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ sll(result, left, shift);
- __ Subu(result, result, left);
+ __ sll(scratch, left, shift);
+ __ Subu(result, scratch, left);
}
// Correct the sign of the result is the constant is negative.
@@ -1140,6 +1363,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
// No need to mask the right operand on MIPS, it is built into the variable
// shift instructions.
switch (instr->op()) {
+ case Token::ROR:
+ __ Ror(result, left, Operand(ToRegister(right_op)));
+ break;
case Token::SAR:
__ srav(result, left, ToRegister(right_op));
break;
@@ -1161,6 +1387,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ Ror(result, left, Operand(shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sra(result, left, shift_count);
@@ -1242,6 +1475,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatureScope scope(masm(), FPU);
double v = instr->value();
__ Move(result, v);
}
@@ -1350,6 +1584,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -1430,6 +1673,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bind(&done);
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
+ CpuFeatureScope scope(masm(), FPU);
FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result());
@@ -1470,6 +1714,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ CpuFeatureScope scope(masm(), FPU);
DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
@@ -1516,7 +1761,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
// instruction (andi zero_reg) will never be used in normal code.
@@ -1579,9 +1824,10 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
+ CpuFeatureScope scope(masm(), FPU);
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
- EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
+ EmitBranchF(true_block, false_block, nue, reg, kDoubleRegZero);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
@@ -1656,6 +1902,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ CpuFeatureScope scope(masm(), FPU);
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -1735,6 +1982,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
+ CpuFeatureScope scope(masm(), FPU);
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
FPURegister left_reg = ToDoubleRegister(left);
@@ -1951,7 +2199,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2040,7 +2288,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2069,7 +2317,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ GetObjectType(temp, temp2, temp2);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
} else {
__ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
@@ -2080,12 +2328,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ lw(temp, FieldMemOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
// End with the address of this class_name instance in temp register.
// On MIPS, the caller must do the comparison with Handle<String>class_name.
@@ -2129,7 +2377,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(result.is(v0));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ Branch(&true_label, eq, result, Operand(zero_reg));
__ li(result, Operand(factory()->false_value()));
@@ -2249,7 +2497,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2261,10 +2509,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
+void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
+}
+
+
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2272,26 +2528,53 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
// A minor optimization that relies on LoadRoot always emitting one
// instruction.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- Label done;
+ Label done, check;
__ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+ __ bind(&check);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
__ bind(&done);
}
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in v0.
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ Pop(ra, fp);
- __ Addu(sp, sp, Operand(sp_delta));
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+ if (NeedsEagerFrame()) {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ Addu(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ __ Addu(reg, reg, Operand(1));
+ __ sll(at, reg, kPointerSizeLog2);
+ __ Addu(sp, sp, at);
+ }
+ }
__ Jump(ra);
}
@@ -2622,7 +2905,183 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ }
+ } else {
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ Register value = external_pointer;
+ __ lw(value, MemOperand(scratch0(), additional_offset));
+ __ And(sfpd_lo, value, Operand(kBinary32MantissaMask));
+
+ __ srl(scratch0(), value, kBinary32MantissaBits);
+ __ And(scratch0(), scratch0(),
+ Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ Xor(at, scratch0(), Operand(0x00));
+ __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
+
+ __ Xor(at, scratch0(), Operand(0xff));
+ Label skip;
+ __ Branch(&skip, ne, at, Operand(zero_reg));
+ __ li(scratch0(), Operand(0x7ff));
+ __ bind(&skip);
+ __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
+
+ // Rebias exponent.
+ __ Addu(scratch0(),
+ scratch0(),
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ And(sfpd_hi, value, Operand(kBinary32SignMask));
+ __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord);
+ __ Or(sfpd_hi, sfpd_hi, at);
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ srl(at, sfpd_lo, kMantissaShiftForHiWord);
+ __ Or(sfpd_hi, sfpd_hi, at);
+ __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord);
+
+ } else {
+ __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset));
+ __ lw(sfpd_hi, MemOperand(scratch0(),
+ additional_offset + kPointerSize));
+ }
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ lb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ lbu(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ lh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ lhu(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+ ((constant_key + instr->additional_index()) << element_size_shift);
+ if (!key_is_constant) {
+ __ sll(scratch, key, shift_size);
+ __ Addu(elements, elements, scratch);
+ }
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ __ Addu(elements, elements, Operand(base_offset));
+ __ ldc1(result, MemOperand(elements));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ }
+ } else {
+ __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
+ __ lw(sfpd_lo, MemOperand(elements, base_offset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ ASSERT(kPointerSize == sizeof(kHoleNanLower32));
+ DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32));
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register elements = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -2635,8 +3094,8 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
instr->additional_index());
store_base = elements;
} else {
- Register key = EmitLoadRegister(instr->key(), scratch);
- // Even though the HLoadKeyedFastElement instruction forces the input
+ Register key = EmitLoadRegister(instr->key(), scratch0());
+ // Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
@@ -2664,46 +3123,14 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
}
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- if (key_is_constant) {
- __ Addu(elements, elements,
- Operand(((constant_key + instr->additional_index()) <<
- element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
} else {
- __ sll(scratch, key, shift_size);
- __ Addu(elements, elements, Operand(scratch));
- __ Addu(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << element_size_shift)));
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ DoLoadKeyedFixedArray(instr);
}
-
- __ ldc1(result, MemOperand(elements));
}
@@ -2751,89 +3178,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- FPURegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
- } else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
- }
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ lb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ lbu(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ lh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ lhu(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ lw(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ lw(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->key()).is(a0));
@@ -3006,8 +3350,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- __ mov(result, cp);
+ for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ __ mov(result, cp);
+ return;
+ }
+ }
}
@@ -3167,8 +3517,6 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
Label done;
__ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
__ mov(result, input);
- ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
- __ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
__ bind(&done);
@@ -3176,6 +3524,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ CpuFeatureScope scope(masm(), FPU);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
@@ -3212,24 +3561,22 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- FPURegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
Register except_flag = ToRegister(instr->temp());
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
+ result,
input,
scratch1,
+ double_scratch0(),
except_flag);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- // Load the result.
- __ mfc1(result, single_scratch);
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
@@ -3243,8 +3590,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3296,17 +3645,15 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
Register except_flag = scratch;
-
__ EmitFPUTruncate(kRoundToMinusInf,
- double_scratch0().low(),
- double_scratch0(),
result,
+ double_scratch0(),
+ at,
+ double_scratch1,
except_flag);
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- __ mfc1(result, double_scratch0().low());
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
@@ -3320,6 +3667,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input);
@@ -3327,6 +3675,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->temp());
@@ -3351,6 +3700,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
+ CpuFeatureScope scope(masm(), FPU);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@@ -3381,6 +3731,7 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
+ CpuFeatureScope scope(masm(), FPU);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3456,11 +3807,26 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ CpuFeatureScope scope(masm(), FPU);
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DoubleRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(
+ masm(), input, result, double_scratch1, double_scratch2,
+ temp1, temp2, scratch0());
+}
+
+
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3468,7 +3834,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3476,7 +3842,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3484,7 +3850,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3576,7 +3942,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3608,9 +3974,29 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
+ __ li(a0, Operand(instr->arity()));
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in a2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
+ isolate());
+ __ li(a2, Operand(undefined_value));
+ }
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->constructor()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
__ li(a0, Operand(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ li(a2, Operand(instr->hydrogen()->property_cell()));
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3619,6 +4005,13 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ __ Addu(result, base, Operand(instr->offset()));
+}
+
+
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
@@ -3693,29 +4086,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- if (operand->IsRegister()) {
- __ And(at, ToRegister(operand), Operand(kSmiTagMask));
- DeoptimizeIf(ne, environment, at, Operand(zero_reg));
- } else {
- __ li(at, ToOperand(operand));
- __ And(at, at, Operand(kSmiTagMask));
- DeoptimizeIf(ne, environment, at, Operand(zero_reg));
- }
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check()) return;
+
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
@@ -3737,108 +4110,9 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyedFastElement instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ sw(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- }
-
- __ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
-}
-
-
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
-
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ CpuFeatureScope scope(masm(), FPU);
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3909,6 +4183,118 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
}
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatureScope scope(masm(), FPU);
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ if (key_is_constant) {
+ __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, elements, Operand(scratch));
+ __ Addu(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(&not_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ }
+
+ __ bind(&not_nan);
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+ : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ sw(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a2));
ASSERT(ToRegister(instr->key()).is(a1));
@@ -3923,31 +4309,39 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
-
- __ mov(ToRegister(instr->result()), object_reg);
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
- __ li(new_map_reg, Operand(to_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ li(new_map_reg, Operand(to_map));
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_compiled_transitions) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ mov(a0, object_reg);
+ __ li(a1, Operand(to_map));
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(a3));
+ __ li(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
@@ -3955,7 +4349,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(a3));
+ __ li(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
@@ -3966,11 +4362,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label fail;
+ __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&fail);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4047,7 +4453,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
ASSERT(!char_code.is(result));
__ Branch(deferred->entry(), hi,
- char_code, Operand(String::kMaxAsciiCharCode));
+ char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ sll(scratch, char_code, kPointerSizeLog2);
__ Addu(result, result, scratch);
@@ -4083,6 +4489,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ CpuFeatureScope scope(masm(), FPU);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@@ -4100,6 +4507,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatureScope scope(masm(), FPU);
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4161,13 +4569,51 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+ masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+ if (mantissa_shift_for_hi_word > 0) {
+ masm->sll(loword, hiword, mantissa_shift_for_lo_word);
+ masm->srl(hiword, hiword, mantissa_shift_for_hi_word);
+ masm->Or(hiword, scratch, hiword);
+ } else {
+ masm->mov(loword, zero_reg);
+ masm->sll(hiword, hiword, mantissa_shift_for_hi_word);
+ masm->Or(hiword, scratch, hiword);
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ masm->li(scratch, 1 << HeapNumber::kExponentShift);
+ masm->nor(scratch, scratch, scratch);
+ masm->and_(hiword, hiword, scratch);
+ }
+}
+
+
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- FPURegister dbl_scratch = double_scratch0();
+ DoubleRegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -4181,16 +4627,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ Xor(src, src, Operand(0x80000000));
}
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ __ mtc1(src, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
+ } else {
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0,
+ sfpd_lo, sfpd_hi,
+ scratch0(), f2);
+ }
} else {
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ __ mtc1(src, dbl_scratch);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ } else {
+ Label no_leading_zero, done;
+ __ And(at, src, Operand(0x80000000));
+ __ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
+
+ // Integer has one leading zeros.
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1);
+ __ Branch(&done);
+
+ __ bind(&no_leading_zero);
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0);
+ __ Branch(&done);
+ }
}
if (FLAG_inline_new) {
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, t1);
__ Branch(&done);
}
@@ -4204,11 +4674,19 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ StoreToSafepointRegisterSlot(zero_reg, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, v0);
+ __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
+ } else {
+ __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
+ __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
+ }
+ __ Addu(dst, dst, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4230,15 +4708,72 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
+ bool convert_hole = false;
+ HValue* change_input = instr->hydrogen()->value();
+ if (change_input->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
+ convert_hole = load->UsesMustHandleHole();
+ }
+
+ Label no_special_nan_handling;
+ Label done;
+ if (convert_hole) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
+ __ Move(reg, scratch0(), input_reg);
+ Label canonicalize;
+ __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
+ __ li(reg, factory()->the_hole_value());
+ __ Branch(&done);
+ __ bind(&canonicalize);
+ __ Move(input_reg,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ } else {
+ Label not_hole;
+ __ Branch(&not_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
+ __ li(reg, factory()->the_hole_value());
+ __ Branch(&done);
+ __ bind(&not_hole);
+ __ And(scratch, sfpd_hi, Operand(0x7ff00000));
+ __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
+ Label special_nan_handling;
+ __ And(at, sfpd_hi, Operand(0x000FFFFF));
+ __ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
+ __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
+ __ bind(&special_nan_handling);
+ double canonical_nan =
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double();
+ uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
+ __ li(sfpd_lo,
+ Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
+ __ li(sfpd_hi,
+ Operand(static_cast<uint32_t>(casted_nan >> 32)));
+ }
+ }
+
+ __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
} else {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatureScope scope(masm(), FPU);
+ __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+ } else {
+ __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
+ __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
+ }
+ // Now that we have finished with the object's real address tag it
+ __ Addu(reg, reg, kHeapObjectTag);
+ __ bind(&done);
}
@@ -4251,6 +4786,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ __ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4281,42 +4817,57 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Register scratch = scratch0();
+ CpuFeatureScope scope(masm(), FPU);
Label load_smi, heap_number, done;
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- // Heap number map check.
- __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env, scratch, Operand(at));
- } else {
- Label heap_number;
- __ Branch(&heap_number, eq, scratch, Operand(at));
+ // Heap number map check.
+ __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(ne, env, scratch, Operand(at));
+ } else {
+ Label heap_number;
+ __ Branch(&heap_number, eq, scratch, Operand(at));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
- // Convert undefined to NaN.
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
- __ Branch(&done);
+ // Convert undefined to NaN.
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ Branch(&done);
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ mfc1(at, result_reg.low());
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ mfc1(scratch, result_reg.high());
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ __ bind(&heap_number);
+ }
+ // Heap number to double register conversion.
+ __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ mfc1(at, result_reg.low());
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ mfc1(scratch, result_reg.high());
+ DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ }
+ __ Branch(&done);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+ __ SmiUntag(scratch, input_reg);
+ DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg));
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ __ Move(result_reg,
+ FixedDoubleArray::hole_nan_as_double());
+ __ Branch(&done);
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ Branch(&done);
// Smi to double register conversion
__ bind(&load_smi);
@@ -4332,7 +4883,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
- FPURegister single_scratch = double_scratch.low();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4347,8 +4898,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
+ CpuFeatureScope scope(masm(), FPU);
Register scratch3 = ToRegister(instr->temp2());
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ FPURegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4383,18 +4935,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
+ input_reg,
double_scratch,
scratch1,
+ double_scratch2,
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- // Load the result.
- __ mfc1(input_reg, single_scratch);
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
@@ -4444,10 +4994,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DoubleRegister result_reg = ToDoubleRegister(result);
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ HValue* value = instr->hydrogen()->value();
+ if (value->type().IsSmi()) {
+ if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
+ }
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ }
+ }
+ }
+
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
@@ -4456,10 +5024,10 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_input = ToDoubleRegister(instr->value());
- FPURegister single_scratch = double_scratch0().low();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
+ FPURegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
single_scratch,
@@ -4470,17 +5038,15 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
+ result_reg,
double_input,
scratch1,
+ double_scratch0(),
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
- // Load the result.
- __ mfc1(result_reg, single_scratch);
}
}
@@ -4556,37 +5122,38 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Register scratch,
+void LCodeGen::DoCheckMapCommon(Register map_reg,
Handle<Map> map,
CompareMapMode mode,
LEnvironment* env) {
Label success;
- __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success, mode);
DeoptimizeIf(al, env);
__ bind(&success);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register scratch = scratch0();
+ Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMapAndBranch(
- reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
+ map_reg, map, &success, eq, &success, REQUIRE_EXACT_MAP);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ CpuFeatureScope vfp_scope(masm(), FPU);
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
@@ -4595,6 +5162,7 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ CpuFeatureScope vfp_scope(masm(), FPU);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@@ -4602,6 +5170,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatureScope vfp_scope(masm(), FPU);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
@@ -4637,30 +5206,31 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
+ ASSERT(instr->temp()->Equals(instr->result()));
+ Register prototype_reg = ToRegister(instr->temp());
+ Register map_reg = ToRegister(instr->temp2());
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
+ ZoneList<Handle<Map> >* maps = instr->maps();
- // Load prototype object.
- __ LoadHeapObject(temp1, current_prototype);
+ ASSERT(prototypes->length() == maps->length());
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(temp1, current_prototype);
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ __ LoadHeapObject(prototype_reg,
+ prototypes->at(prototypes->length() - 1));
+ } else {
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
+ __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+ DoCheckMapCommon(map_reg,
+ maps->at(i),
+ ALLOW_ELEMENT_TRANSITION_MAPS,
+ instr->environment());
+ }
}
-
- // Check the holder map.
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@@ -4692,12 +5262,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
+ __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
+ TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
@@ -4747,10 +5313,72 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
}
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ AllocateInNewSpace(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register size = ToRegister(instr->size());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(size, size);
+ __ push(size);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
+ AllocationSiteMode allocation_site_mode =
+ instr->hydrogen()->allocation_site_mode();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
@@ -4784,8 +5412,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(instr->hydrogen()->depth() == 1);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -4793,10 +5421,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
} else {
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4804,10 +5432,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset) {
+ int* offset,
+ AllocationSiteMode mode) {
ASSERT(!source.is(a2));
ASSERT(!result.is(a2));
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
+ object->map()->CanTrackAllocationSite();
+
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
@@ -4817,8 +5449,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// this object and its backing store.
int object_offset = *offset;
int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
+ int elements_offset = *offset + object_size;
+ if (create_allocation_site_info) {
+ elements_offset += AllocationSiteInfo::kSize;
+ *offset += AllocationSiteInfo::kSize;
+ }
+
*offset += object_size + elements_size;
// Copy object header.
@@ -4837,13 +5474,15 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
+ isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ Addu(a2, result, Operand(*offset));
__ sw(a2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
__ sw(a2, FieldMemOperand(result, total_offset));
@@ -4853,6 +5492,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
+ // Build Allocation Site Info if desired
+ if (create_allocation_site_info) {
+ __ li(a2, Operand(Handle<Map>(isolate()->heap()->
+ allocation_site_info_map())));
+ __ sw(a2, FieldMemOperand(result, object_size));
+ __ sw(source, FieldMemOperand(result, object_size + kPointerSize));
+ }
if (has_elements) {
// Copy elements backing store header.
@@ -4883,13 +5529,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value(fast_elements->get(i), isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ Addu(a2, result, Operand(*offset));
__ sw(a2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
__ sw(a2, FieldMemOperand(result, total_offset));
@@ -4929,7 +5576,7 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -4940,7 +5587,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
__ bind(&allocated);
int offset = 0;
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset,
+ instr->hydrogen()->allocation_site_mode());
ASSERT_EQ(size, offset);
}
@@ -4952,25 +5600,26 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(t0, literals);
- __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a2, Operand(constant_properties));
+ __ LoadHeapObject(a3, literals);
+ __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(a1, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
- __ li(a1, Operand(Smi::FromInt(flags)));
- __ Push(t0, a3, a2, a1);
+ __ li(a0, Operand(Smi::FromInt(flags)));
// Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
+ __ Push(a3, a2, a1, a0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(a3, a2, a1, a0);
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5010,7 +5659,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
@@ -5044,7 +5693,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
FastNewClosureStub stub(shared_info->language_mode());
__ li(a1, Operand(shared_info));
__ push(a1);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ li(a2, Operand(shared_info));
__ li(a1, Operand(pretenure
@@ -5101,7 +5750,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// register.
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
@@ -5109,7 +5758,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(at);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
__ Branch(USE_DELAY_SLOT, false_label,
@@ -5122,7 +5771,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(zero_reg);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
@@ -5130,13 +5779,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
cmp1 = at;
cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
// The first instruction of JumpIfSmi is an And - it is safe in the delay
@@ -5150,7 +5799,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(zero_reg);
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, scratch, input);
@@ -5159,16 +5808,26 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
}
- // input is an object, it is safe to use GetObjectType in the delay slot.
- __ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, false_label,
- lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ if (FLAG_harmony_symbols) {
+ // input is an object, it is safe to use GetObjectType in the delay slot.
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, scratch, Operand(SYMBOL_TYPE));
+ // Still an object, so the InstanceType can be loaded.
+ __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
+ __ Branch(USE_DELAY_SLOT, false_label,
+ lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ } else {
+ // input is an object, it is safe to use GetObjectType in the delay slot.
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, false_label,
+ lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ }
// Still an object, so the InstanceType can be loaded.
__ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
__ Branch(USE_DELAY_SLOT, false_label,
@@ -5222,6 +5881,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt() {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
@@ -5252,6 +5912,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
@@ -5311,7 +5976,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 38c5255a4b..1d2a65912a 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -54,6 +54,7 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -61,6 +62,7 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -76,6 +78,15 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
@@ -123,10 +134,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+ void DoCheckMapCommon(Register map_reg, Handle<Map> map,
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
@@ -189,7 +201,6 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -262,8 +273,10 @@ class LCodeGen BASE_EMBEDDED {
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -316,11 +329,8 @@ class LCodeGen BASE_EMBEDDED {
DoubleRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ LEnvironment* env,
+ NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -365,17 +375,28 @@ class LCodeGen BASE_EMBEDDED {
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
@@ -388,6 +409,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
@@ -395,6 +417,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -410,6 +433,7 @@ class LCodeGen BASE_EMBEDDED {
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index 87efae5f4d..b415156730 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -172,8 +172,10 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
+ CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatureScope scope(cgen_->masm(), FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@@ -193,9 +195,11 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
+ CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
+ CpuFeatureScope scope(cgen_->masm(), FPU);
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
@@ -232,6 +236,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
+ CpuFeatureScope scope(cgen_->masm(), FPU);
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
@@ -271,6 +276,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
+ CpuFeatureScope scope(cgen_->masm(), FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
@@ -281,6 +287,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatureScope scope(cgen_->masm(), FPU);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 0b6dcaea51..8848032b64 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -112,7 +112,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -177,6 +181,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sll-t";
case Token::SAR: return "sra-t";
case Token::SHR: return "srl-t";
@@ -285,6 +290,13 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + %d", offset());
+}
+
+
void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
@@ -296,6 +308,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
+void LMathExp::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -345,6 +362,17 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -372,20 +400,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
}
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
value()->PrintTo(stream);
}
@@ -599,6 +634,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -639,8 +675,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort("Out of virtual registers while trying to allocate temp register.");
+ return NULL;
+ }
+ operand->set_virtual_register(vreg);
return operand;
}
@@ -664,6 +704,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -702,15 +747,13 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseRegisterAtStart(right_value);
}
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
bool does_deopt = false;
-
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
- if (may_deopt) {
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
@@ -896,7 +939,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
+ HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new(zone()) LGoto(successor->block_id());
@@ -950,6 +993,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LInstanceSize(object));
+}
+
+
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -978,6 +1027,15 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* inner_object) {
+ LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
+ LInnerAllocatedObject* result =
+ new(zone()) LInnerAllocatedObject(base_object);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses()
? NULL
@@ -986,7 +1044,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ // If there is a non-return use, the context must be allocated in a register.
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ return DefineAsRegister(new(zone()) LContext);
+ }
+ }
+
+ return NULL;
}
@@ -1034,6 +1099,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ } else if (op == kMathExp) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
// Input cannot be the same as the result.
// See lithium-codegen-mips.cc::DoMathPowHalf.
@@ -1043,7 +1117,9 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineFixedDouble(result, f4);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+
+ LOperand* temp = (op == kMathRound) ? FixedTemp(f6) :
+ (op == kMathFloor) ? TempRegister() : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
@@ -1096,6 +1172,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* constructor = UseFixed(instr->constructor(), a1);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), a1);
argument_count_ -= instr->argument_count();
@@ -1110,6 +1194,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1244,8 +1333,22 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips32r2) {
+ if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
+ HAdd* add = HAdd::cast(instr->uses().value());
+ if (instr == add->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded
+ // into a multiply-add.
+ return NULL;
+ }
+ if (instr == add->right() && !add->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add.
+ return NULL;
+ }
+ }
+ }
return DoArithmeticD(Token::MUL, instr);
-
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1272,6 +1375,15 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
+
+
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1285,6 +1397,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips32r2) {
+ if (instr->left()->IsMul())
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+ if (instr->right()->IsMul()) {
+ ASSERT(!instr->left()->IsMul());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+ }
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
@@ -1350,7 +1471,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1504,6 +1625,27 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
+ HInductionVariableAnnotation* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1511,6 +1653,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -1542,6 +1691,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1555,8 +1705,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
- : NULL;
+ LOperand* temp3 = FixedTemp(f22);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
temp2,
@@ -1567,6 +1716,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@@ -1586,6 +1736,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@@ -1628,10 +1779,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp1 = TempRegister();
+ LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(result);
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(Define(result, temp1));
}
@@ -1641,6 +1792,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -1674,7 +1831,9 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), v0));
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), v0),
+ parameter_count);
}
@@ -1800,53 +1959,49 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
- LOperand* elements = UseTempRegister(instr->elements());
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LLoadKeyed* result = NULL;
+ if (!instr->is_external()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseTempRegister(instr->elements());
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ // float->double conversion on non-VFP2 requires an extra scratch
+ // register. For convenience, just mark the elements register as "UseTemp"
+ // so that it can be used as a temp during the float->double conversion
+ // after it's no longer needed after the float load.
+ bool needs_temp =
+ !CpuFeatures::IsSupported(FPU) &&
+ (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
+ LOperand* external_pointer = needs_temp
+ ? UseTempRegister(instr->elements())
+ : UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -1860,66 +2015,49 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* val = NULL;
+ LOperand* key = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ val = UseTempRegister(instr->value());
+ } else {
+ ASSERT(instr->value()->representation().IsTagged());
+ object = UseTempRegister(instr->elements());
+ val = needs_write_barrier ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ key = needs_write_barrier ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+ return new(zone()) LStoreKeyed(object, key, val);
+ }
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
+ LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
@@ -1938,14 +2076,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- LOperand* object = UseRegister(instr->object());
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
+ } else if (FLAG_compiled_transitions) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL, NULL);
+ return AssignPointerMap(result);
} else {
LOperand* object = UseFixed(instr->object(), a0);
LOperand* fixed_object_reg = FixedTemp(a2);
@@ -1954,11 +2094,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new(zone()) LTransitionElementsKind(object,
new_map_reg,
fixed_object_reg);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ return MarkAsCall(result, instr);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@@ -2025,12 +2175,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* size = UseTempRegister(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
}
@@ -2073,8 +2234,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
@@ -2142,7 +2312,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
@@ -2186,8 +2356,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 3a9aa7accf..80635c3896 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -50,6 +50,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -67,6 +68,7 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
@@ -93,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DummyUse) \
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
@@ -106,6 +109,7 @@ class LCodeGen;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
@@ -125,17 +129,17 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathExp) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
+ V(MultiplyAddD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
@@ -149,6 +153,7 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -156,10 +161,8 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -173,6 +176,7 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -184,7 +188,9 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(DateField) \
V(WrapReceiver) \
- V(Drop)
+ V(Drop) \
+ V(InnerAllocatedObject)
+
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
@@ -254,6 +260,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
+ // Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
virtual bool HasResult() const = 0;
@@ -397,6 +408,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
+class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
@@ -605,6 +625,24 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -620,7 +658,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -645,6 +683,30 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
+class LMathExp: public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -901,6 +963,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
+class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInstanceSize(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
+};
+
+
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1122,6 +1197,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1246,14 +1345,24 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
};
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn: public LTemplateInstruction<0, 2, 0> {
public:
- explicit LReturn(LOperand* value) {
+ explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
+ inputs_[1] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1337,59 +1446,26 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
+ virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1526,6 +1602,22 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
+class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInnerAllocatedObject(LOperand* base_object) {
+ inputs_[0] = base_object;
+ }
+
+ LOperand* base_object() { return inputs_[0]; }
+ int offset() { return hydrogen()->offset(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
+ DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+};
+
+
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@@ -1536,6 +1628,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
@@ -1698,6 +1791,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
};
+class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNewArray(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ LOperand* constructor() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
@@ -1769,6 +1879,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1903,51 +2014,28 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
+ bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
-
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1971,37 +2059,15 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp) {
+ LOperand* fixed_object_temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp;
+ temps_[1] = fixed_object_temp;
}
LOperand* object() { return inputs_[0]; }
@@ -2016,6 +2082,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
};
@@ -2115,7 +2199,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@@ -2128,8 +2212,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
+ ZoneList<Handle<JSObject> >* prototypes() const {
+ return hydrogen()->prototypes();
+ }
+ ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
};
@@ -2197,7 +2283,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
public:
LAllocateObject(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@@ -2212,6 +2298,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
};
+class LAllocate: public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
@@ -2336,8 +2439,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2459,6 +2563,8 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
private:
enum Status {
UNUSED,
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 052387ab01..603f1be70e 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -118,8 +118,8 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
- Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -127,11 +127,11 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
ldc1(reg, MemOperand(sp, i * kDoubleSize));
}
- Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+ Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
PopSafepointRegisters();
}
@@ -167,7 +167,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -853,7 +853,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
void MacroAssembler::MultiPushFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -868,7 +868,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -883,7 +883,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
void MacroAssembler::MultiPopFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -897,7 +897,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
@@ -1125,23 +1125,19 @@ void MacroAssembler::BranchF(Label* target,
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
switch (cc) {
- case Uless:
- case less:
+ case lt:
c(OLT, D, cmp1, cmp2);
bc1t(target);
break;
- case Ugreater:
- case greater:
+ case gt:
c(ULE, D, cmp1, cmp2);
bc1f(target);
break;
- case Ugreater_equal:
- case greater_equal:
+ case ge:
c(ULT, D, cmp1, cmp2);
bc1f(target);
break;
- case Uless_equal:
- case less_equal:
+ case le:
c(OLE, D, cmp1, cmp2);
bc1t(target);
break;
@@ -1149,10 +1145,18 @@ void MacroAssembler::BranchF(Label* target,
c(EQ, D, cmp1, cmp2);
bc1t(target);
break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
case ne:
c(EQ, D, cmp1, cmp2);
bc1f(target);
break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
default:
CHECK(0);
};
@@ -1165,7 +1169,7 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@@ -1345,7 +1349,7 @@ void MacroAssembler::ConvertToInt32(Register source,
}
bind(&right_exponent);
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
// MIPS FPU instructions implementing double precision to integer
// conversion using round to zero. Since the FP value was qualified
// above, the resulting integer should be a legal int32.
@@ -1395,49 +1399,68 @@ void MacroAssembler::ConvertToInt32(Register source,
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
+ Register result,
DoubleRegister double_input,
- Register scratch1,
+ Register scratch,
+ DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
+ ASSERT(!result.is(scratch));
+ ASSERT(!double_input.is(double_scratch));
+ ASSERT(!except_flag.is(scratch));
+
ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
+ Label done;
+
+ // Clear the except flag (0 = no exception)
+ mov(except_flag, zero_reg);
+
+ // Test for values that can be exactly represented as a signed 32-bit integer.
+ cvt_w_d(double_scratch, double_input);
+ mfc1(result, double_scratch);
+ cvt_d_w(double_scratch, double_scratch);
+ BranchF(&done, NULL, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
- // Ingore inexact exceptions.
+ // Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
- cfc1(scratch1, FCSR);
+ cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- Round_w_d(result, double_input);
+ Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
- Trunc_w_d(result, double_input);
+ Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
- Ceil_w_d(result, double_input);
+ Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
- Floor_w_d(result, double_input);
+ Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
- ctc1(scratch1, FCSR);
+ ctc1(scratch, FCSR);
+ // Move the converted value into the result register.
+ mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
+
+ bind(&done);
}
@@ -1529,7 +1552,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch,
Register scratch2,
Register scratch3) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
ASSERT(!scratch2.is(result));
ASSERT(!scratch3.is(result));
ASSERT(!scratch3.is(scratch2));
@@ -2736,7 +2759,7 @@ void MacroAssembler::DebugBreak() {
PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -2889,12 +2912,12 @@ void MacroAssembler::ThrowUncatchable(Register value) {
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2922,20 +2945,21 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
// used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
Register obj_size_reg = scratch2;
- li(topaddr, Operand(new_space_allocation_top));
+ li(topaddr, Operand(allocation_top));
li(obj_size_reg, Operand(object_size));
// This code stores a temporary value in t9.
@@ -2974,6 +2998,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3109,9 +3134,9 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@@ -3136,12 +3161,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
@@ -3155,12 +3176,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
Heap::kConsAsciiStringMapRootIndex,
@@ -3174,12 +3191,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3194,12 +3207,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3215,19 +3224,20 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* need_gc) {
+ Label* need_gc,
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ if (tagging_mode == TAG_RESULT) {
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
@@ -3380,13 +3390,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3412,8 +3422,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ sw(mantissa_reg, FieldMemOperand(
+ scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
+ uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
+ sizeof(kHoleNanLower32);
sw(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -3433,7 +3445,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+ elements_offset));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
@@ -3456,7 +3469,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4,
f2);
if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
sdc1(f0, MemOperand(scratch1, 0));
} else {
sw(mantissa_reg, MemOperand(scratch1, 0));
@@ -3549,7 +3562,7 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
if (IsMipsSoftFloatABI) {
Move(dst, v0, v1);
} else {
@@ -3559,7 +3572,7 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
} else {
@@ -3570,7 +3583,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
if (dreg2.is(f12)) {
ASSERT(!dreg1.is(f14));
@@ -3589,7 +3602,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
Move(a2, reg);
@@ -3834,6 +3847,15 @@ void MacroAssembler::IsObjectJSStringType(Register object,
}
+void MacroAssembler::IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail) {
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
+}
+
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -3907,19 +3929,21 @@ void MacroAssembler::GetObjectType(Register object,
// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(),
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
cond, r1, r2, bd);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+ ASSERT(allow_stub_calls_ ||
+ stub->CompilingCallsToThisStubIsGCSafe(isolate()));
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -3931,13 +3955,13 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
int stack_space) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
// Allocate HandleScope in callee-save registers.
@@ -3948,6 +3972,14 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Addu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, a0);
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// The O32 ABI requires us to pass a pointer in a0 where the returned struct
// (4 bytes) will be placed. This is also built into the Simulator.
// Set up the pointer to the returned value (a0). It was allocated in
@@ -3960,6 +3992,14 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
DirectCEntryStub stub;
stub.GenerateCall(this, function);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, a0);
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// As mentioned above, on MIPS a pointer is returned - we need to dereference
// it to get the actual return value (which is also a pointer).
lw(v0, MemOperand(v0));
@@ -4020,7 +4060,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
}
@@ -4205,7 +4245,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate()));
- CEntryStub stub(1, kSaveFPRegs);
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub stub(1, mode);
CallStub(&stub);
}
@@ -4222,7 +4265,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
PrepareCEntryFunction(ext);
CEntryStub stub(1);
- CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
+ CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -4251,7 +4294,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd) {
PrepareCEntryFunction(builtin);
CEntryStub stub(1);
- Jump(stub.GetCode(),
+ Jump(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
al,
zero_reg,
@@ -4509,6 +4552,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ lw(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the global context from the global or builtins object.
+ lw(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ lw(function,
+ MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -4584,16 +4640,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
+ CpuFeatureScope scope(this, FPU);
// The stack must be allign to 0 modulo 8 for stores with sdc1.
ASSERT(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
ASSERT(IsPowerOf2(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
- int space = FPURegister::kNumRegisters * kDoubleSize;
+ int space = FPURegister::kMaxNumRegisters * kDoubleSize;
Subu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -4621,9 +4678,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
+ CpuFeatureScope scope(this, FPU);
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
}
@@ -4830,6 +4888,20 @@ void MacroAssembler::AssertString(Register object) {
}
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ And(t0, object, Operand(kSmiTagMask));
+ Check(ne, "Operand is a smi and not a name", t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(le, "Operand is not a name", object, Operand(LAST_NAME_TYPE));
+ pop(object);
+ }
+}
+
+
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
@@ -5272,7 +5344,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
lw(t9, FieldMemOperand(value, String::kLengthOffset));
And(t8, instance_type, Operand(kStringEncodingMask));
@@ -5397,6 +5469,29 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
}
+void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+ Register receiver_reg,
+ Register scratch_reg,
+ Condition cond,
+ Label* allocation_info_present) {
+ Label no_info_available;
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ Addu(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ Branch(&no_info_available, lt, scratch_reg, Operand(new_space_start));
+ li(at, Operand(new_space_allocation_top));
+ lw(at, MemOperand(at));
+ Branch(&no_info_available, gt, scratch_reg, Operand(at));
+ lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
+ Branch(allocation_info_present, cond, scratch_reg,
+ Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
+ bind(&no_info_available);
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index b57e51486c..e4cf3bcb7c 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -65,6 +65,14 @@ enum AllocationFlags {
SIZE_IN_WORDS = 1 << 2
};
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
+
// Flags used for the ObjectToDoubleFPURegister function.
enum ObjectToDoubleFlags {
// No special flags.
@@ -469,19 +477,20 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support.
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
void AllocateInNewSpace(Register object_size,
Register result,
Register scratch1,
@@ -536,7 +545,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required);
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
@@ -620,6 +630,7 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -752,14 +763,16 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
- // Truncates a double using a specific rounding mode.
+ // Truncates a double using a specific rounding mode, and writes the value
+ // to the result register.
// The except_flag will contain any exceptions caused by the instruction.
- // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+ // If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
void EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
+ Register result,
DoubleRegister double_input,
- Register scratch1,
+ Register scratch,
+ DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
@@ -823,6 +836,7 @@ class MacroAssembler: public Assembler {
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -887,6 +901,10 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ void IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// -------------------------------------------------------------------------
// Debugger Support.
@@ -972,14 +990,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
@@ -1129,6 +1147,7 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = cc_always,
Register r1 = zero_reg,
const Operand& r2 = Operand(zero_reg),
@@ -1343,6 +1362,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
@@ -1427,6 +1449,17 @@ class MacroAssembler: public Assembler {
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
+ // AllocationSiteInfo support. Arrays may have an associated
+ // AllocationSiteInfo object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, jump to allocation_info_present
+ void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
+ Register scratch_reg,
+ Condition cond,
+ Label* allocation_info_present);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1501,9 +1534,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 672ba0eeee..036cbb13e4 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -262,7 +262,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
if (mode_ == ASCII) {
__ lbu(a1, MemOperand(a0, 0));
__ addiu(a0, a0, char_size());
- ASSERT(str[i] <= String::kMaxAsciiCharCode);
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
} else {
__ lhu(a1, MemOperand(a0, 0));
@@ -341,7 +341,13 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Or(t0, t0, Operand(0x20)); // Also convert input character.
__ Branch(&fail, ne, t0, Operand(a3));
__ Subu(a3, a3, Operand('a'));
- __ Branch(&fail, hi, a3, Operand('z' - 'a')); // Is a3 a lowercase letter?
+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Subu(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, hi, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
__ bind(&loop_check);
__ Branch(&loop, lt, a0, Operand(a1));
@@ -511,7 +517,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ li(a0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ And(a1, current_character(), Operand(kTableSize - 1));
__ Addu(a0, a0, a1);
} else {
@@ -531,25 +537,20 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
case 's':
// Match space-characters.
if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success;
__ Branch(&success, eq, current_character(), Operand(' '));
// Check range 0x09..0x0d.
__ Subu(a0, current_character(), Operand('\t'));
- BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
+ __ Branch(&success, ls, a0, Operand('\r' - '\t'));
+ // \u00a0 (NBSP).
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
__ bind(&success);
return true;
}
return false;
case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
- __ Subu(a0, current_character(), Operand('\t'));
- BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
- return true;
- }
+ // The emitted code for generic character classes is good enough.
return false;
case 'd':
// Match ASCII digits ('0'..'9').
@@ -1155,7 +1156,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1186,7 +1187,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index cf87f93602..be9f369d01 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1016,6 +1016,13 @@ void Simulator::set_register(int reg, int32_t value) {
}
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[0];
+ registers_[reg + 1] = dbl[1];
+}
+
+
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
@@ -1045,6 +1052,19 @@ int32_t Simulator::get_register(int reg) const {
}
+double Simulator::get_double_from_register_pair(int reg) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(registers_[0])];
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ return(dm_val);
+}
+
+
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
@@ -1525,7 +1545,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1);
}
v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
// See DirectCEntryStub::GenerateCall for explanation of register usage.
@@ -1536,7 +1556,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1, arg2);
}
v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else {
SimulatorRuntimeCall target =
@@ -1740,6 +1760,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
UNIMPLEMENTED_MIPS();
};
break;
+ case COP1X:
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
@@ -1929,6 +1951,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
const uint32_t rt_u = static_cast<uint32_t>(rt);
const int32_t rd_reg = instr->RdValue();
+ const int32_t fr_reg = instr->FrValue();
const int32_t fs_reg = instr->FsValue();
const int32_t ft_reg = instr->FtValue();
const int32_t fd_reg = instr->FdValue();
@@ -2173,8 +2196,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
- i64 = (uint32_t) get_fpu_register(fs_reg);
- i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+ i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
+ i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
@@ -2190,6 +2213,19 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
UNREACHABLE();
};
break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ double fr, ft, fs;
+ fr = get_fpu_register_double(fr_reg);
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ set_fpu_register_double(fd_reg, fs * ft + fr);
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR: {
@@ -2219,10 +2255,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero was not checked in the configuration step - div and
- // divu do not raise exceptions. On division by 0, the result will
- // be UNPREDICTABLE.
- if (rt != 0) {
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0 and
+ // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE.
+ if (rt != 0 && !(rs == INT_MIN && rt == -1)) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
@@ -2718,34 +2754,7 @@ void Simulator::Execute() {
}
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments.
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
+void Simulator::CallInternal(byte* entry) {
// Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -2809,6 +2818,38 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(gp, gp_val);
set_register(sp, sp_val);
set_register(fp, fp_val);
+}
+
+
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments.
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(a0, va_arg(parameters, int32_t));
+ set_register(a1, va_arg(parameters, int32_t));
+ set_register(a2, va_arg(parameters, int32_t));
+ set_register(a3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+ - kCArgsSlotsSize);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -2819,6 +2860,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
+double Simulator::CallFP(byte* entry, double d0, double d1) {
+ if (!IsMipsSoftFloatABI) {
+ set_fpu_register_double(f12, d0);
+ set_fpu_register_double(f14, d1);
+ } else {
+ int buffer[2];
+ ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
+ memcpy(buffer, &d0, sizeof(d0));
+ set_dw_register(a0, buffer);
+ memcpy(buffer, &d1, sizeof(d1));
+ set_dw_register(a2, buffer);
+ }
+ CallInternal(entry);
+ if (!IsMipsSoftFloatABI) {
+ return get_fpu_register_double(f0);
+ } else {
+ return get_double_from_register_pair(v0);
+ }
+}
+
+
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 776badc29b..67f595302b 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -184,7 +184,9 @@ class Simulator {
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
int32_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
@@ -214,6 +216,8 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -353,6 +357,7 @@ class Simulator {
void GetFpArgs(double* x, int32_t* y);
void SetFpResult(const double& result);
+ void CallInternal(byte* entry);
// Architecture state.
// Registers.
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index bd15775d4b..d5cf6de905 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -121,14 +121,14 @@ static void ProbeTable(Isolate* isolate,
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
+// Name must be unique and receiver must be a heap object.
static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Label* miss_label,
Register receiver,
- Handle<String> name,
+ Handle<Name> name,
Register scratch0,
Register scratch1) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsUniqueName());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -162,13 +162,13 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
@@ -217,7 +217,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+ __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, at);
uint32_t mask = kPrimaryTableSize - 1;
@@ -307,26 +307,19 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
}
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ lw(dst, FieldMemOperand(src, offset));
- } else {
+void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index) {
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ lw(dst, FieldMemOperand(dst, offset));
+ src = dst;
}
+ __ lw(dst, FieldMemOperand(src, offset));
}
@@ -424,12 +417,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
Handle<Map> transition,
- Handle<String> name,
+ Handle<Name> name,
Register receiver_reg,
Register name_reg,
+ Register value_reg,
Register scratch1,
Register scratch2,
- Label* miss_label) {
+ Label* miss_label,
+ Label* miss_restore_name) {
// a0 : value.
Label exit;
@@ -466,17 +461,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
holder = JSObject::cast(holder->GetPrototype());
} while (holder->GetPrototype()->IsJSObject());
}
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
+ scratch1, scratch2, name, miss_restore_name);
}
// Stub never generated for non-global objects that require access
@@ -522,14 +508,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ sw(a0, FieldMemOperand(receiver_reg, offset));
+ __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
// Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit, scratch1);
+ __ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, a0);
+ __ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
offset,
name_reg,
@@ -542,14 +528,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Get the properties array.
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(a0, FieldMemOperand(scratch1, offset));
+ __ sw(value_reg, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit);
+ __ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, a0);
+ __ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
offset,
name_reg,
@@ -559,18 +545,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// Return the value (register v0).
+ ASSERT(value_reg.is(a0));
__ bind(&exit);
__ mov(v0, a0);
__ Ret();
}
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
+void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ li(this->name(), Operand(name));
+ }
}
@@ -682,7 +670,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// Pass the additional arguments.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ li(a0, api_call_info);
__ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
@@ -751,7 +739,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
LookupResult* lookup,
Register receiver,
Register scratch1,
@@ -782,7 +770,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch3,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
- Handle<String> name,
+ Handle<Name> name,
const CallOptimization& optimization,
Label* miss_label) {
ASSERT(optimization.is_constant_call());
@@ -876,7 +864,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<String> name,
+ Handle<Name> name,
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
@@ -937,7 +925,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// property.
static void GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<GlobalObject> global,
- Handle<String> name,
+ Handle<Name> name,
Register scratch,
Label* miss) {
Handle<JSGlobalPropertyCell> cell =
@@ -956,7 +944,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
static void GenerateCheckPropertyCells(MacroAssembler* masm,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
Register scratch,
Label* miss) {
Handle<JSObject> current = object;
@@ -984,7 +972,7 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ mtc1(ival, f0);
__ cvt_s_w(f0, f0);
__ sll(scratch1, wordoffset, 2);
@@ -1049,43 +1037,8 @@ static void StoreIntAsFloat(MacroAssembler* masm,
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
- if (mantissa_shift_for_hi_word > 0) {
- __ sll(loword, hiword, mantissa_shift_for_lo_word);
- __ srl(hiword, hiword, mantissa_shift_for_hi_word);
- __ or_(hiword, scratch, hiword);
- } else {
- __ mov(loword, zero_reg);
- __ sll(hiword, hiword, mantissa_shift_for_hi_word);
- __ or_(hiword, scratch, hiword);
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ li(scratch, 1 << HeapNumber::kExponentShift);
- __ nor(scratch, scratch, scratch);
- __ and_(hiword, hiword, scratch);
- }
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1099,9 +1052,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register holder_reg,
Register scratch1,
Register scratch2,
- Handle<String> name,
+ Handle<Name> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1129,11 +1084,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
+ NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
@@ -1142,9 +1098,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
reg = holder_reg; // From now on the object will be in holder_reg.
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- Handle<Map> current_map(current->map());
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ Register map_reg = scratch1;
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ Handle<Map> current_map(current->map());
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+ } else {
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -1156,7 +1118,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (heap()->InNewSpace(*prototype)) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ li(reg, Operand(prototype));
@@ -1174,9 +1136,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Log the check depth.
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
+ DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ }
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1194,128 +1158,128 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
- __ Ret();
+void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ Branch(success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss, scratch1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
-
- // Return the constant value.
- __ LoadHeapObject(v0, value);
- __ Ret();
-}
+Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Label* success,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label miss;
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch1));
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
-
- // Load the properties dictionary.
- Register dictionary = scratch1;
- __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- miss,
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
&probe_done,
dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3;
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lw(scratch2, FieldMemOperand(pointer, kValueOffset));
- __ Branch(miss, ne, scratch2, Operand(callback));
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ Branch(&miss, ne, scratch2(), Operand(callback));
+ }
+
+ HandlerFrontendFooter(success, &miss);
+ return reg;
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss, scratch1);
+void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Label* success,
+ Handle<GlobalObject> global) {
+ Label miss;
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
+ Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (!global.is_null()) {
+ GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
+ }
+
+ if (!last->HasFastProperties()) {
+ __ lw(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ lw(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
+ __ Branch(&miss, ne, scratch2(),
+ Operand(isolate()->factory()->null_value()));
}
+ HandlerFrontendFooter(success, &miss);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex index) {
+ GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
+ __ Ret();
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+ // Return the constant value.
+ __ LoadHeapObject(v0, value);
+ __ Ret();
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver);
- __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ __ push(receiver());
+ __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
- __ li(scratch3, callback);
- __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ __ li(scratch3(), callback);
+ __ lw(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
} else {
- __ li(scratch3, Handle<Object>(callback->data()));
+ __ li(scratch3(), Handle<Object>(callback->data(),
+ callback->GetIsolate()));
}
__ Subu(sp, sp, 4 * kPointerSize);
__ sw(reg, MemOperand(sp, 3 * kPointerSize));
- __ sw(scratch3, MemOperand(sp, 2 * kPointerSize));
- __ li(scratch3, Operand(ExternalReference::isolate_address()));
- __ sw(scratch3, MemOperand(sp, 1 * kPointerSize));
- __ sw(name_reg, MemOperand(sp, 0 * kPointerSize));
+ __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
+ __ li(scratch3(), Operand(ExternalReference::isolate_address()));
+ __ sw(scratch3(), MemOperand(sp, 1 * kPointerSize));
+ __ sw(name(), MemOperand(sp, 0 * kPointerSize));
- __ mov(a2, scratch2); // Saved in case scratch2 == a1.
- __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
+ __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
+ __ mov(a1, sp); // a1 (first argument - see note below) = Handle<Name>
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
// struct from the function (which is currently the case). This means we pass
@@ -1343,22 +1307,15 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+void BaseLoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1367,8 +1324,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1378,17 +1336,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1396,86 +1351,40 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ Push(receiver, holder_reg, name_reg);
+ __ Push(receiver(), holder_reg, this->name());
} else {
- __ Push(holder_reg, name_reg);
+ __ Push(holder_reg, this->name());
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), v0, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ li(scratch2, callback);
-
- __ Push(receiver, holder_reg);
- __ lw(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ li(scratch1, Operand(ExternalReference::isolate_address()));
- __ Push(scratch3, scratch1, scratch2, name_reg);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
@@ -1484,7 +1393,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
}
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ Branch(miss, ne, a2, Operand(name));
}
@@ -1493,7 +1402,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name,
+ Handle<Name> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1549,8 +1458,8 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
- Handle<String> name) {
+ PropertyIndex index,
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1623,7 +1532,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = t2;
Register end_elements = t1;
@@ -1634,7 +1543,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
v0,
Heap::kFixedArrayMapRootIndex,
- &call_builtin,
+ &check_double,
DONT_DO_SMI_CHECK);
// Get the array's length into v0 and calculate new length.
@@ -1650,7 +1559,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
// Check if value is a smi.
- Label with_write_barrier;
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
@@ -1671,6 +1579,39 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
+ __ bind(&check_double);
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ a0,
+ Heap::kFixedDoubleArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into r0 and calculate new length.
+ __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(a0, a0, Operand(Smi::FromInt(argc)));
+
+ // Get the elements' length.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ Branch(&call_builtin, gt, a0, Operand(t0));
+
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ t0, a0, elements, a3, t1, a2, t5,
+ &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Check for a smi.
+ __ Drop(argc + 1);
+ __ Ret();
+
__ bind(&with_write_barrier);
__ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -1682,8 +1623,12 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(a3, t3, &call_builtin);
+
+ __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&call_builtin, eq, t3, Operand(at));
// edx: receiver
- // r3: map
+ // a3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
@@ -1692,7 +1637,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
__ mov(a2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
@@ -1703,7 +1650,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&call_builtin);
__ mov(a2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
@@ -1928,8 +1877,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
v0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- v0, holder, a1, a3, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = a1;
Register index = t1;
@@ -2008,8 +1958,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
v0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- v0, holder, a1, a3, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = v0;
Register index = t1;
@@ -2039,7 +1990,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
__ Drop(argc + 1);
__ Ret();
}
@@ -2146,7 +2097,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
return Handle<Code>::null();
}
- CpuFeatures::Scope scope_fpu(FPU);
+ CpuFeatureScope scope_fpu(masm(), FPU);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2416,25 +2367,16 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -2467,77 +2409,93 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(a1, &fast);
- __ GetObjectType(a1, a0, a0);
- __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ GetObjectType(a1, a1, a3);
+ __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE));
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&fast, eq, a1, Operand(t0));
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t0));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(a1, &fast);
+ __ GetObjectType(a1, a0, a0);
+ __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
+ break;
+ }
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&fast, eq, a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t0));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
break;
}
+ }
+
+ __ jmp(success);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(
function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, Handle<String>::cast(name));
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2546,7 +2504,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -2589,14 +2547,15 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<GlobalObject> holder,
Handle<JSGlobalPropertyCell> cell,
Handle<JSFunction> function,
- Handle<String> name) {
+ Handle<Name> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ Handle<Code> code = CompileCustomCall(
+ object, holder, cell, function, Handle<String>::cast(name));
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
@@ -2643,61 +2602,24 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Name register might be clobbered.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- a1, a2, a3, t0,
- &miss);
- __ bind(&miss);
- __ li(a2, Operand(Handle<String>(name))); // Restore name.
- Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<Name> name,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
+ Handle<ExecutableAccessorInfo> callback) {
Label miss;
// Check that the maps haven't changed.
- __ JumpIfSmi(a1, &miss, a3);
- CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
+ __ JumpIfSmi(receiver(), &miss);
+ CheckPrototypes(object, receiver(), holder,
+ scratch1(), scratch2(), scratch3(), name, &miss);
// Stub never generated for non-global objects that require access
// checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- __ push(a1); // Receiver.
- __ li(a3, Operand(callback)); // Callback info.
- __ Push(a3, a2, a0);
+ __ push(receiver()); // Receiver.
+ __ li(at, Operand(callback)); // Callback info.
+ __ Push(at, this->name(), value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
@@ -2707,11 +2629,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+ return GetICCode(kind(), Code::CALLBACKS, name);
}
@@ -2761,62 +2682,28 @@ void StoreStubCompiler::GenerateStoreViaSetter(
#define __ ACCESS_MASM(masm())
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
+ Handle<JSObject> object,
+ Handle<Name> name) {
Label miss;
// Check that the map of the object hasn't changed.
- __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
+ __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(a1, a3, &miss);
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
}
// Stub is never generated for non-global objects that require access
// checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- __ Push(a1, a2, a0); // Receiver, name, value.
+ __ Push(receiver(), this->name(), value());
- __ li(a0, Operand(Smi::FromInt(strict_mode_)));
- __ push(a0); // Strict mode.
+ __ li(scratch1(), Operand(Smi::FromInt(strict_mode())));
+ __ push(scratch1()); // strict mode
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
@@ -2826,133 +2713,113 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetICCode(kind(), Code::INTERCEPTOR, name);
}
Handle<Code> StoreStubCompiler::CompileStoreGlobal(
Handle<GlobalObject> object,
Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
+ Handle<Name> name) {
Label miss;
// Check that the map of the global has not changed.
- __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+ __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ __ Branch(&miss, ne, scratch1(), Operand(Handle<Map>(object->map())));
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ li(t0, Operand(cell));
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
- __ Branch(&miss, eq, t1, Operand(t2));
+ __ li(scratch1(), Operand(cell));
+ __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
+ __ lw(scratch3(),
+ FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
+ __ Branch(&miss, eq, scratch3(), Operand(scratch2()));
// Store the value in the cell.
- __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+ __ sw(value(),
+ FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
__ mov(v0, a0); // Stored value must be returned in v0.
// Cells are always rescanned, so no write barrier here.
Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
+ __ IncrementCounter(
+ counters->named_store_global_inline(), 1, scratch1(), scratch2());
__ Ret();
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ __ IncrementCounter(
+ counters->named_store_global_inline_miss(), 1, scratch1(), scratch2());
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, name);
+ return GetICCode(kind(), Code::NORMAL, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver is not a smi.
- __ JumpIfSmi(a0, &miss);
-
- // Check the maps of the full prototype chain.
- CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Handle<GlobalObject> global) {
+ Label success;
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
- }
+ NonexistentHandlerFrontend(object, last, name, &success, global);
+ __ bind(&success);
// Return undefined if maps of the full prototype chain is still the same.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ Ret();
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+ return GetCode(kind(), Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { a0, a2, a3, a1, t0, t1 };
+ return registers;
+}
- __ mov(v0, a0);
- GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { a1, a0, a2, a3, t0, t1 };
+ return registers;
+}
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+
+Register* StoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { a1, a2, a0, a3, t0, t1 };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, t1, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { a2, a1, a0, a3, t0, t1 };
+ return registers;
+}
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ Branch(miss, ne, name_reg, Operand(name));
+}
+
+
+void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
+ Register name_reg,
+ Label* miss) {
+ __ Branch(miss, ne, name_reg, Operand(name));
}
@@ -2993,91 +2860,18 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a0, &miss);
- CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -- [sp] : receiver
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
+ Handle<GlobalObject> global,
Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
+ Handle<Name> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
+ Label success, miss;
- // Check that the map of the global has not changed.
- __ JumpIfSmi(a0, &miss);
- CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+ __ CheckMap(
+ receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
__ li(a3, Operand(cell));
@@ -3089,293 +2883,48 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- __ mov(v0, t0);
+ HandlerFrontendFooter(&success, &miss);
+ __ bind(&success);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ mov(v0, t0);
__ Ret();
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::FIELD, name);
+ return GetICCode(kind(), Code::NORMAL, name);
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, t1, callback,
- name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadArrayLength(masm(), a1, a2, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
+Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
-
- // Check the name hasn't changed.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
+ }
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(a1, &miss);
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
- eq, a2, Operand(receiver_maps->at(current)));
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
+ eq, map_reg, Operand(receiver_maps->at(current)));
}
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
-
- // Check that the name has not changed.
- __ Branch(&miss, ne, a1, Operand(name));
-
- // a3 is used as scratch register. a1 and a2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- a2, a1, a3, t0,
- &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
- __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ InlineCacheState state =
+ receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
}
@@ -3383,37 +2932,30 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -----------------------------------
Label miss;
- __ JumpIfSmi(a2, &miss);
+ __ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
- a3, Operand(receiver_maps->at(i)));
+ scratch1(), Operand(receiver_maps->at(i)));
} else {
Label next_map;
- __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
- __ li(a3, Operand(transitioned_maps->at(i)));
+ __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
+ __ li(transition_map(), Operand(transitioned_maps->at(i)));
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
}
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
@@ -3522,7 +3064,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
__ bind(&next);
} else {
// Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
+ masm()->isolate());
__ li(a2, Operand(constant));
__ sw(a2, MemOperand(t5));
__ Addu(t5, t5, kPointerSize);
@@ -3600,9 +3143,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
@@ -3612,10 +3153,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
}
@@ -3654,9 +3192,10 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch0,
Register scratch1,
FPURegister double_scratch0,
+ FPURegister double_scratch1,
Label* fail) {
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
@@ -3669,15 +3208,15 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
DONT_DO_SMI_CHECK);
__ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
__ EmitFPUTruncate(kRoundToZero,
- double_scratch0,
- double_scratch0,
scratch0,
+ double_scratch0,
+ at,
+ double_scratch1,
scratch1,
kCheckForInexactConversion);
__ Branch(fail, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch0, double_scratch0);
__ SmiTagCheckOverflow(key, scratch0, scratch1);
__ BranchOnOverflow(fail, scratch1);
__ bind(&key_ok);
@@ -3688,343 +3227,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = a0;
- Register receiver = a1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
-
- __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // a3: elements array
-
- // Check that the index is in range.
- __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
- __ sra(t2, key, kSmiTagSize);
- // Unsigned comparison catches both negative and too-large values.
- __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
-
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
- // a3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = a2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ srl(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lb(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lbu(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ addu(t3, a3, key);
- __ lh(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t3, a3, key);
- __ lhu(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lw(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ sll(t3, t2, 2);
- __ addu(t3, a3, t3);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ lwc1(f0, MemOperand(t3, 0));
- } else {
- __ lw(value, MemOperand(t3, 0));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t2, key, 2);
- __ addu(t3, a3, t2);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ ldc1(f0, MemOperand(t3, 0));
- } else {
- // t3: pointer to the beginning of the double we want to load.
- __ lw(a2, MemOperand(t3, 0));
- __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // a2: value
- // For float array type:
- // f0: value (if FPU is supported)
- // a2: value (if FPU is not supported)
- // For double array type:
- // f0: value (if FPU is supported)
- // a2/a3: value (if FPU is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
- __ Branch(&box_int, lt, t3, Operand(zero_reg));
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- __ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion.
- // The arm version uses a temporary here to save r0, but we don't need to
- // (a0 is not modified).
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(value, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- Register dst1 = t2;
- Register dst2 = t3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- f0,
- dst1,
- dst2,
- t1,
- f2);
- __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Label pl_box_int;
- __ And(t2, value, Operand(0xC0000000));
- __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
-
- // It can fit in an Smi.
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- __ bind(&pl_box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
-
- // This is replaced by a macro:
- // __ mtc1(value, f0); // LS 32-bits.
- // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
- // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
-
- __ Cvt_d_uw(f0, value, f22);
-
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
-
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ And(t2, value, Operand(0x80000000));
- __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
- __ And(t2, value, Operand(0x40000000));
- __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
-
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- Register hiword = value; // a2.
- Register loword = a3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, t0, 0);
- __ Branch(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, t0, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
-
- __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
- __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
-
- __ mov(v0, t2);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // The float (single) value is already in fpu reg f0 (if we use float).
- __ cvt_d_s(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // FPU is not available, do manual single to double conversion.
-
- // a2: floating point value (binary32).
- // v0: heap number for result
-
- // Extract mantissa to t4.
- __ And(t4, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to t5.
- __ srl(t5, value, kBinary32MantissaBits);
- __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
-
- __ li(t0, 0x7ff);
- __ Xor(t1, t5, Operand(0xFF));
- __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
- __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
-
- // Rebias exponent.
- __ Addu(t5,
- t5,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ And(a2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
- __ or_(a2, a2, t0);
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ srl(t0, t4, kMantissaShiftForHiWord);
- __ or_(a2, a2, t0);
- __ sll(a0, t4, kMantissaShiftForLoWord);
-
- __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ Ret();
- }
-
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // The double value is already in f0
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
-
- __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
- }
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
-
- __ Push(a1, a0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -4047,7 +3249,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -4126,10 +3328,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
FloatingPointHelper::ConvertIntToDouble(
masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst1, dst2.
+ f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
t0, f2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ sdc1(f0, MemOperand(a3, 0));
} else {
__ sw(t2, MemOperand(a3, 0));
@@ -4167,7 +3369,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
+ CpuFeatureScope scope(masm, FPU);
__ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
@@ -4405,9 +3607,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
@@ -4417,119 +3617,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic);
-
- // Get the elements array.
- __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ AssertFastElements(a2);
-
- // Check that the key is within bounds.
- __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
-
- // Load the result and make sure it's not the hole.
- __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, t0, a3);
- __ lw(t0, MemOperand(t0));
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ Branch(&miss_force_generic, eq, t0, Operand(t1));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, t0);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = a0;
- Register receiver_reg = a1;
- Register elements_reg = a2;
- Register heap_number_reg = a2;
- Register indexed_double_offset = a3;
- Register scratch = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
- Register heap_number_map = t3;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
-
- // Get the elements array.
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ sw(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ lw(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ sw(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(v0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
}
@@ -4537,7 +3625,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
+ KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -4561,7 +3649,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -4576,7 +3664,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis.
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ if (is_js_array && IsGrowStoreMode(store_mode)) {
__ Branch(&grow, hs, key_reg, Operand(scratch));
} else {
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
@@ -4620,15 +3708,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Ret();
__ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
@@ -4646,8 +3731,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Branch(&check_capacity, ne, elements_reg, Operand(at));
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
+ __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
@@ -4690,8 +3774,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ jmp(&finish_store);
__ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}
@@ -4699,17 +3782,18 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
+ KeyedAccessStoreMode store_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
// -- a2 : receiver
// -- ra : return address
- // -- a3 : scratch
+ // -- a3 : scratch (elements backing store)
// -- t0 : scratch (elements_reg)
// -- t1 : scratch (mantissa_reg)
// -- t2 : scratch (exponent_reg)
// -- t3 : scratch4
+ // -- t4 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
@@ -4722,13 +3806,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = t1;
Register scratch3 = t2;
Register scratch4 = t3;
+ Register scratch5 = t4;
Register length_reg = t3;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4742,7 +3827,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ if (IsGrowStoreMode(store_mode)) {
__ Branch(&grow, hs, key_reg, Operand(scratch1));
} else {
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
@@ -4752,7 +3837,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ StoreNumberToDoubleElements(value_reg,
key_reg,
- receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
@@ -4766,15 +3850,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Handle store cache miss, replacing the ic with the generic stub.
__ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
__ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ if (is_js_array && IsGrowStoreMode(store_mode)) {
// Grow the array by a single element if possible.
__ bind(&grow);
@@ -4800,17 +3881,34 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ Branch(&check_capacity, ne, elements_reg, Operand(at));
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
+ __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
+ // Initialize the new FixedDoubleArray.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+ __ mov(scratch1, elements_reg);
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ // All registers after this are overwritten.
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ scratch5,
+ &transition_elements_kind);
+
+ __ li(scratch1, Operand(kHoleNanLower32));
+ __ li(scratch2, Operand(kHoleNanUpper32));
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
+ int offset = FixedDoubleArray::OffsetOfElementAt(i);
+ __ sw(scratch1, FieldMemOperand(elements_reg, offset));
+ __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
+ }
+
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4823,7 +3921,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ jmp(&finish_store);
+ __ Ret();
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
@@ -4837,8 +3935,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ jmp(&finish_store);
__ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
}
}